Skip to content

Commit

Permalink
adding configurable cbapi timeout and updating some tfvars (#148)
Browse files Browse the repository at this point in the history
* updating some tfvars

* adding configurable carbon black timeout
  • Loading branch information
ryandeivert authored Aug 20, 2019
1 parent ec7b4b0 commit 3877787
Show file tree
Hide file tree
Showing 6 changed files with 37 additions and 13 deletions.
12 changes: 12 additions & 0 deletions cli/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,18 @@ def carbon_black_url(self, value: str) -> None:
)
self._config['carbon_black_url'] = value

@property
def carbon_black_timeout(self) -> str:
return self._config['carbon_black_timeout']

@carbon_black_timeout.setter
def carbon_black_timeout(self, value: str) -> None:
try:
int_value = int(value)
except ValueError:
raise InvalidConfigError('carbon_black_timeout "{}" is not an integer'.format(value))
self._config['carbon_black_timeout'] = int_value

@property
def encrypted_carbon_black_api_token(self) -> str:
return self._config['encrypted_carbon_black_api_token']
Expand Down
5 changes: 4 additions & 1 deletion cli/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,10 @@ def cb_copy_all(self) -> None:

print('Connecting to CarbonBlack server {} ...'.format(self._config.carbon_black_url))
carbon_black = cbapi.CbResponseAPI(
url=self._config.carbon_black_url, token=self._config.plaintext_carbon_black_api_token)
url=self._config.carbon_black_url,
timeout=self._config.carbon_black_timeout,
token=self._config.plaintext_carbon_black_api_token
)

self._enqueue(
self._config.binaryalert_downloader_queue_name,
Expand Down
5 changes: 4 additions & 1 deletion lambda_functions/downloader/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@

# Establish boto3 and S3 clients at import time so Lambda can cache them for re-use.
CARBON_BLACK = cbapi.CbResponseAPI(
url=os.environ['CARBON_BLACK_URL'], token=DECRYPTED_TOKEN)
url=os.environ['CARBON_BLACK_URL'],
timeout=int(os.environ['CARBON_BLACK_TIMEOUT']),
token=DECRYPTED_TOKEN
)
CLOUDWATCH = boto3.client('cloudwatch')
S3_BUCKET = boto3.resource('s3').Bucket(os.environ['TARGET_S3_BUCKET'])

Expand Down
1 change: 1 addition & 0 deletions terraform/lambda.tf
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ module "binaryalert_downloader" {

environment_variables = {
CARBON_BLACK_URL = "${var.carbon_black_url}"
CARBON_BLACK_TIMEOUT = "${var.carbon_black_timeout}"
ENCRYPTED_CARBON_BLACK_API_TOKEN = "${var.encrypted_carbon_black_api_token}"
TARGET_S3_BUCKET = "${aws_s3_bucket.binaryalert_binaries.id}"
}
Expand Down
26 changes: 15 additions & 11 deletions terraform/terraform.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,19 @@ aws_region = "us-east-1"
// Prefix used in all resource names (required for uniqueness). E.g. "company_team"
name_prefix = ""


/* ********** [Auto-Configured] Optional CarbonBlack Downloader ********** */
enable_carbon_black_downloader = false

// URL of the CarbonBlack server.
carbon_black_url = ""

// Timeout to use for Carbon Black API client.
// The client default is 60, so set to something lower if desired.
carbon_black_timeout = 60

// The encrypted CarbonBlack API token will automatically be generated and saved here:
encrypted_carbon_black_api_token = ""


/* ********** Log Retention ********** */
// Pre-existing bucket in which to store S3 access logs. If not specified, one will be created.
s3_log_bucket = ""
Expand All @@ -38,29 +40,26 @@ s3_log_expiration_days = 90
// How long to retain Lambda function logs.
lambda_log_retention_days = 14


/* ********** Advanced Configuration ********** */
// Tags make it easier to organize resources, view grouped billing information, etc.
// All supported resources (CloudWatch logs, Dyanmo, KMS, Lambda, S3, SQS) are tagged with
// Name = [YOUR_VALUE_BELOW]
tagged_name = "BinaryAlert"


// ##### Alarms #####
// Use an existing SNS topic for metric alarms (instead of creating one automatically).
metric_alarm_sns_topic_arn = ""

// Alarm if no binaries are analyzed for this amount of time.
expected_analysis_frequency_minutes = 30


// ##### Dynamo #####
// Provisioned read/write capacity for the Dynamo table which stores match results.
// Capacity is (very roughly) maximum number of operations per second. See Dynamo documentation.
// Since there will likely be very few matches, these numbers can be quite low.
dynamo_read_capacity = 10
dynamo_write_capacity = 5

dynamo_write_capacity = 5

// ##### Lambda #####
// For reference, here is a simple architectural schematic:
Expand All @@ -79,14 +78,17 @@ dynamo_write_capacity = 5

// Memory, time, and concurrency limits for the analyzer function.
lambda_analyze_memory_mb = 1024

lambda_analyze_timeout_sec = 300

lambda_analyze_concurrency_limit = 100

// Memory, time, and concurrency limits for the downloader function.
lambda_download_memory_mb = 128
lambda_download_memory_mb = 256

lambda_download_timeout_sec = 300
lambda_download_concurrency_limit = 100

lambda_download_concurrency_limit = 100

// ##### S3 #####
// WARNING: If force destroy is enabled, all objects in the S3 bucket(s) will be deleted during
Expand All @@ -96,26 +98,28 @@ force_destroy = true
// If using BinaryAlert to scan existing S3 buckets, add the S3 and KMS resource ARNs here to grant
// the appropriate permissions to the analyzer Lambda function.
external_s3_bucket_resources = []
external_kms_key_resources = []

external_kms_key_resources = []

// ##### SNS #####
// Create a separate SNS topic which reports files that do NOT match any YARA rules.
enable_negative_match_alerts = false


// ##### SQS #####
// Maximum number of messages that will be received by each invocation of the respective function.
analyze_queue_batch_size = 10

download_queue_batch_size = 1

// Messages in the queue will be retained and retried for the specified duration until expiring.
analyze_queue_retention_secs = 86400

download_queue_retention_secs = 86400

// During a retroactive scan, number of S3 objects to pack into a single SQS message.
objects_per_retro_message = 4

// If an SQS message is not deleted (successfully processed) after the max number of receive
// attempts, the message is delivered to the SQS dead-letter queue.
download_queue_max_receives = 7
// Retries are common due to race-conditions with binaries landing on the Carbon Black server
download_queue_max_receives = 100
1 change: 1 addition & 0 deletions tests/lambda_functions/downloader/main_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ class MainTest(fake_filesystem_unittest.TestCase):
def setUp(self):
"""Mock out CarbonBlack and boto3 before importing the module."""
os.environ['CARBON_BLACK_URL'] = 'test-carbon-black-url'
os.environ['CARBON_BLACK_TIMEOUT'] = '15'
os.environ['ENCRYPTED_CARBON_BLACK_API_TOKEN'] = base64.b64encode(
b'super-secret').decode('ascii')
os.environ['TARGET_S3_BUCKET'] = 'test-s3-bucket'
Expand Down

0 comments on commit 3877787

Please sign in to comment.