Skip to content

Commit

Permalink
Merge branch 'master' of github.com:STAMP-project/camp
Browse files Browse the repository at this point in the history
  • Loading branch information
brice-morin committed Sep 24, 2019
2 parents dc7a30f + 9522bb1 commit dd33d1f
Show file tree
Hide file tree
Showing 298 changed files with 2,784,544 additions and 211 deletions.
2 changes: 1 addition & 1 deletion camp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class About:

PROGRAM = "CAMP"

VERSION = "0.6.0"
VERSION = "0.6.1"

COMMIT_HASH = None

Expand Down
9 changes: 8 additions & 1 deletion camp/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,15 @@ def generate(self, arguments):
try:
model = self._load_model()
configurations = self._generate_configurations(arguments, model)

count = 0
for index, each_configuration in enumerate(configurations, 1):
self._save(index, each_configuration)
count += 1

if count == 0:
self._ui.no_configuration_generated()


except InvalidYAMLModel as error:
self._ui.invalid_yaml_model(error)
Expand Down Expand Up @@ -108,8 +115,8 @@ def _generate_configurations(self, arguments, model):


def _save(self, index, configuration):
self._output.save_as_graphviz(index, configuration)
yaml_file = self._output.save_as_yaml(index, configuration)
self._output.save_as_graphviz(index, configuration)
self._ui.new_configuration(index, configuration, yaml_file)


Expand Down
7 changes: 7 additions & 0 deletions camp/data/metamodel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,13 @@
type: Partner
multiple: true
mandatory: true

# Hold the transitive closure all underlying feature provider
- name: stack
type: CInstance
mandatory: true
multiple: true

- name: use_feature
type: CInstance
- name: configuration
Expand Down
112 changes: 83 additions & 29 deletions camp/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ def _cover(self):

def _solve(self):
z3_solution = cast_all_objects(self._solver.model())
#import pprint; pprint.pprint(z3_solution)

self._solver.add(self._context.evaluate(self._as_constraint(z3_solution)))
return self._extract_from(z3_solution)
Expand Down Expand Up @@ -146,6 +147,7 @@ def _extract_from(self, z3_solution):
if "use_feature" in item and item["use_feature"]:
provider = result.resolve(item["use_feature"])
instance.feature_provider = provider

if "partners" in item:
providers = [result.resolve(z3_solution[each]["endpoint"]) \
for each in item["partners"]]
Expand Down Expand Up @@ -399,8 +401,8 @@ def coverage_gain(self):


INTEGRITY_VARIABLES = [
("CInstance", ["ci", "ci1", "ci2", "spi"]),
("Feature", ["fr", "fp"]),
("CInstance", ["ci", "ci1", "ci2", "ci3", "ci4", "ci5", "spi"]),
("Feature", ["fr", "fp", "f1", "f2", "f3"]),
("Partner", ["partner"]),
("Service", ["service", "sr", "sp"]),
("Variable", ["var"]),
Expand All @@ -415,30 +417,102 @@ def coverage_gain(self):
CInstance.all_instances().count() > 0
""",

# Cannot be deploy on itself
# -----
# DEFINITION of CInstance::stack

# No feature provider, no stack
"""
CInstance.forall(ci1,
Implies(
ci1.use_feature.undefined(),
ci1.stack.count() == 0))
""",


# The feature provider must be in the stack
"""
CInstance.forall(ci1,
Implies(
Not(ci1.use_feature.undefined()),
ci1.stack.exists(ci2, ci2 == ci1.use_feature)))
""",


# Stack Correctness: Any element in the stack is either the
# underlying feature provider or somewhere further down the stack
"""
CInstance.forall(ci1,
ci1.stack.forall(ci2,
Or(ci1.use_feature == ci2,
ci1.use_feature.stack.exists(ci3, ci3 == ci2))))
""",


# Stack Completness: Every element in my stack is also in the stack of
# the element above me in the stack
"""
CInstance.forall(ci1,
CInstance.forall(ci2,
Implies(
ci2.use_feature == ci1,
And(
ci2.stack.exists(ci3, ci3 == ci1),
ci1.stack.forall(ci4,
ci2.stack.exists(ci5, ci5 == ci4))))))
""",


# No cycle in the deployment structure
"""
CInstance.forall(ci, Not(ci["use_feature"] == ci))
CInstance.forall(ci1,
Not(ci1.stack.exists(ci2, ci2 == ci1)))
""",


# Service bindings

# An instance cannot use its own services
"""
CInstance.forall(ci, Not(ci.partners.exists(
partner, partner.endpoint == ci)))
""",

# Can only deploy on something that provides the required features

# STACK CONSTRUCTION THROUGH FEATURES

# Instances that do not require features cannot have a feature_provider
"""
CInstance.forall(ci,
Implies(
ci.definition.require_features.count() == 0,
ci.use_feature.undefined()))
""",


# Instances that do require features must have one feature_provider that
# provides all the required features
"""
CInstance.forall(ci, ci["definition"]["require_features"].forall(
fr, ci.use_feature["definition"].provide_features.exists(fp, fp == fr)))
CInstance.forall(ci1,
ci1.definition.require_features.forall(f1,
CInstance.exists(ci2,
And(
ci2 == ci1.use_feature,
Or(
ci2.definition.provide_features.exists(f2, f2 == f1),
ci2.stack.exists(ci3,
ci3.definition.provide_features.exists(f3, f3 == f1)))))))
""",

# All partner shall connect to an endpoint that provides the requested service

# All partner shall connect to an endpoint that provides the requested
# service
"""
Partner.forall(partner,
partner.endpoint.definition.provide_services.exists(service,
service == partner.service))
""",


# Instances that do not require services cannot have any
# service provider
"""
Expand All @@ -447,34 +521,14 @@ def coverage_gain(self):
ci["partners"].count() == 0))
""",

# Instances that do not require features cannot have a
# feature_provider
"""
CInstance.forall(ci, Implies(ci["definition"]["require_features"].count() == 0,
ci["use_feature"].undefined()))
""",

# Instances that do require features must have one
# feature_provider
"""
CInstance.forall(ci, Implies(ci["definition"]["require_features"].count() > 0,
Not(ci["use_feature"].undefined())))
""",

# All provided features must be used
"""
CInstance.forall(ci1,
Implies(ci1.definition.provide_features.count() > 0,
CInstance.exists(ci2, ci2.use_feature == ci1)))
""",

# Only one pending service
"""
CInstance.filter(ci1,
And([ci1.definition.provide_services.count() > 0,
CInstance.forall(ci2, ci2.partners.forall(partner,
partner.endpoint != ci1))])).count() == 1
"""
""",

# No pending instances
# """
Expand Down
4 changes: 4 additions & 0 deletions camp/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@ def new_configuration(self, index, configuration, path):
self._summarize(configuration)


def no_configuration_generated(self):
self._print("\nError: No configuration generated! Is the model correct?")


def configurations_loaded(self, path):
self._print("Loading configurations from '{path}' ...", path=path)

Expand Down
11 changes: 11 additions & 0 deletions docs/pages/changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,17 @@ changes that were made.

* CAMP v0.6

* CAMP v0.6.1 (Sep. 23, 2019)

* Fix [Issue
74](https://github.com/STAMP-project/camp/issues/74). Update
the integrity constraints that govern how CAMP stacks
components together. CAMP thus supports "side-by-side"
component inside software stacks.

* CAMP reports explicitly when it cannot find any configuration.


* CAMP v0.6.0 (Aug. 20, 2019)

* Add support for JMeter test execution, report
Expand Down
7 changes: 4 additions & 3 deletions docs/pages/execute.md
Original file line number Diff line number Diff line change
Expand Up @@ -343,11 +343,11 @@ need to explain how to run these tests. We thus elaborate on the
```
Note the new `tests` section that describes the command to be run,
that is `-n -t perftest_script/testman.jmx -l perftest_script/testman.csv -e -o `. as well as the expected format of the test
that is `-n -t perftest_script/testman.jmx -l perftest_script/testman-perftest-report/testman.jtl -e -o `. as well as the expected format of the test
reports, their location, and the extension used to detect them.
---
**Warning**: As per version 0.4, CAMP only supports JMeter/JSON test
**Warning**: As per version 0.6, CAMP only supports JMeter/JSON test
reports produced running JMeter in headless mode.
---
Expand Down Expand Up @@ -466,7 +466,7 @@ TOTAL 66 66 0 0
That's all folks!
```

The reported that the JMeter script made 22 samples (a sample is a single call to a url). No one of them returned an error. To inspect detailed reports for each configuration simply go to generated config folders and look for `index.html` file within the test-reports folder:
The test summary reported that the JMeter script made 22 samples (a sample is a single call to a url). No one of them returned an error. To inspect detailed reports for each configuration simply go to generated config folders and look for `index.html` file within the test-reports folder:

```console
$ tree
Expand All @@ -491,3 +491,4 @@ java-web
:
.
```
Moreover, .jtl files are available to be used, for instance, with Jenkins JMeter plugin (which looks for .jtl reports).
2 changes: 1 addition & 1 deletion samples/java-web/camp.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ components:
docker:
file: tests/Dockerfile
tests:
command: -n -t perftest_script/testman.jmx -l perftest_script/testman.csv -e -o perftest_script/testman-perftest-report
command: -n -t perftest_script/testman.jmx -l perftest_script/testman-perftest-report/testman.jtl -e -o perftest_script/testman-perftest-report
reports:
format: jmeter
location: perftest_script/testman-perftest-report
Expand Down
41 changes: 20 additions & 21 deletions samples/stamp/activeeon/traces/README.MD
Original file line number Diff line number Diff line change
Expand Up @@ -4,57 +4,56 @@ This repository demonstrates how to collect traces from different Proactive conf
`configs` folder contains **three generated Proactive configurations**. Each configuration provides a different dockerized Proactive version:

- **`Config0 (Proactive + HSQLDB)`:** A preconfigured Proactive version that is configured by default to connect to the default HSQLDB database. This is the baseline config used as reference.
- **`Config1 (Proactive + Postgres)`:** A preconfigured Proactive version that is configured to connect to the Postgres database.
- **`Config2 (Proactive + MySQL)`:** A preconfigured Proactive version that is configured to connect to the MySQL database.
- **`Config1 (Proactive + Postgres)`:** A preconfigured Proactive version that is configured to connect to the Postgres database.
- **`Config2 (Proactive + MySQL)`:** A preconfigured Proactive version that is configured to connect to the MySQL database.

The two latter configs are generated using CAMP and the first one is used as baseline or reference.

The `Dockerfile` for each configuration shows how to instrument Proactive to collect traces. The script `run.sh` is executed for every configuration. It executes the following tasks:
Since profiling basically takes a snapshot of the running app _n_ times per second (or at _n_ Hz), there is no guarantee it will collect all traces. To increase the quality of the traces, we run the profiler several times, at different frequencies. The selected frequencies are provided in demo.sh script: `FREQS=(15013 16103 17203 18301)`.

The `Dockerfile` for each configuration shows how to instrument Proactive to collect traces at different frequencies. The script `run.sh` is executed for every configuration. It executes the following tasks:
- Run the Proactive server `
/activeeon_enterprise-pca_server-linux-x64-8.5.0-SNAPSHOT/bin/proactive-server &`. The JVM is configured with the options `-XX:+PreserveFramePointer -agentpath:/liblagent.so`
- Get the corresponding `PID` of the Java process
- Attach the profiler to the Java process `java -cp /libperfagent.jar:$JAVA_HOME/lib/tools.jar net.virtualvoid.perf.AttachOnce $PID`
- Wait until the Proactive server is up `sleep 500`
- Attach the profiler to the Java process `java -cp /libperfagent.jar:$JAVA_HOME/lib/tools.jar net.virtualvoid.perf.AttachOnce $PID`
- Wait until the Proactive server is up `sleep 500`
- Run some tests/load using the proactive client. Here, we send and execute a `workflow-test.xml` in the scheduler `/activeeon_enterprise-pca_server-linux-x64-8.5.0-SNAPSHOT/bin/proactive-client -s /workflow-test.xml`
- We record traces for 300 seconds. The frequency is the default one `perf record -e cpu-clock -p $PID -a -g -o /data/perf.data -- sleep 300`
- We shutdown properly the scheduler using the proactive client.
- We copy traces to the mounted /data folder
> NB: The tested Proactive version being an entreprise version, we do not provide the distribution artifact but in case you need it you can contact [email protected].
>
An advanced tutorial about docker processes profiling is available here [https://github.com/STAMP-project/docker-traces-xp](https://github.com/STAMP-project/docker-traces-xp). You can check it to get more hints about the used commands.

>
An advanced tutorial about docker processes profiling is available here [https://github.com/STAMP-project/docker-traces-xp](https://github.com/STAMP-project/docker-traces-xp). You can check it to get more hints about the used commands.
# Results

Here are the results
```
---- Java traces ----
Configuration 0 contributed 2190/6255 = 35.00 % of all unique traces
Configuration 0 contributed 3258/8573 = 38.00 % of all unique traces
Configuration 1 contributed 2563/6255 = 40.00 % of all unique traces
Configuration 1 contributed 5916/8573 = 69.00 % of all unique traces
Configuration 2 contributed 2594/6255 = 41.00 % of all unique traces
Configuration 2 contributed 3515/8573 = 41.00 % of all unique traces
---- System traces ----
Configuration 0 contributed 1363/2569 = 53.00 % of all unique traces
Configuration 0 contributed 1438/3422 = 42.00 % of all unique traces
Configuration 1 contributed 1238/2569 = 48.00 % of all unique traces
Configuration 1 contributed 1883/3422 = 55.00 % of all unique traces
Configuration 2 contributed 1225/2569 = 47.00 % of all unique traces
Configuration 2 contributed 1575/3422 = 46.00 % of all unique traces
```
-**For Proactive + HSQLDB:**
It contributes to 35.00 % of all unique traces for Java traces and 53.00 % of all unique traces for System traces.
It contributes to 38.00 % of all unique traces for Java traces and 42.00 % of all unique traces for System traces.

-**For Proactive + Postgres:**
It contributes to 40.00 % of all unique traces for Java traces and 48.00 % of all unique traces for System traces.
It contributes to 69.00 % of all unique traces for Java traces and 55.00 % of all unique traces for System traces.

-**For Proactive + MySQL:**
It contributes to 41.00 % of all unique traces for Java traces and 47.00 % of all unique traces for System traces.
It contributes to 41.00 % of all unique traces for Java traces and 46.00 % of all unique traces for System traces.

The traces and raw data are available in the `profiling` folder of each config.

# Conclusion
The three configurations contributed differently to discover unique traces. For Java traces, the CAMP generated configs (Proactive + MySQL and Postgres) discovered more unique traces than the default HSQLDB Proactive configuration (the reference). This shows that the new generated CAMP configs allowed us to discover more JVM traces (which mean more code coverage). For System traces, we remark that the default Proactive config (the reference) uses more system calls than other generated configs.


# Conclusion
The three configurations contributed differently to discover unique traces. For Java traces, the CAMP generated configs (Proactive + MySQL and Postgres) discovered more unique traces than the default HSQLDB Proactive configuration (the reference). This shows that the new generated CAMP configs allowed us to discover more JVM traces (which mean more code coverage). For System traces, we obtain the same result as for Java traces with different traces coverage repectively, 42, 55, and 46%. We also remark that PA + Postgres has the most large traces coverage over Java and system traces compared to other configurations.
6 changes: 5 additions & 1 deletion samples/stamp/activeeon/traces/configs/config0/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
FROM openjdk:8

ARG PROFILER_FREQ

RUN echo "$PROFILER_FREQ" > /.profiler

RUN apt update

RUN apt-get install -y build-essential cmake wget unzip flex bison && rm -rf /var/lib/apt/lists/*
Expand All @@ -12,7 +16,7 @@ RUN cd /root/perf-map-agent && cp out/libperfmap.so /libperfmap.so && cp out/att

RUN cd /root && wget https://github.com/dcapwell/lightweight-java-profiler/archive/master.zip && unzip master.zip && rm -f master.zip

RUN cd /root/lightweight-java-profiler-master && make
RUN cd /root/lightweight-java-profiler-master && sed -i "s/static const int kNumInterrupts = 100;/static const int kNumInterrupts = $PROFILER_FREQ;/" src/globals.h && make

RUN cp /root/lightweight-java-profiler-master/build-64/liblagent.so /liblagent.so

Expand Down
3 changes: 2 additions & 1 deletion samples/stamp/activeeon/traces/configs/config0/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ sleep 500

/activeeon_enterprise-pca_server-linux-x64-8.5.0-SNAPSHOT/bin/proactive-client -s /workflow-test.xml

perf record -e cpu-clock -p $PID -a -g -o /data/perf.data -- sleep 300
PROFILER_FREQ=$(cat /.profiler)
perf record -e cpu-clock -F $PROFILER_FREQ -p $PID -a -g -o /data/perf.data -- sleep 300


cp /tmp/perf*.map /data/perf-$PID.map
Expand Down
Loading

0 comments on commit dd33d1f

Please sign in to comment.