diff --git a/CHANGELOG.md b/CHANGELOG.md index 3afdf6a4e..efb27c9e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,22 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## 1.16.0 - 2021-03-31 + +### Fixed +- Remove the RabbitMQ plugin from the docker version of clowder + +### Added +- Added a `sort` and `order` parameter to `/api/search` endpoint that supports date and numeric field sorting. + If only order is specified, created date is used. String fields are not currently supported. +- Added a new `/api/deleteindex` admin endpoint that will queue an action to delete an Elasticsearch index (usually prior to a reindex). +- JMeter testing suite. + +### Changed +- Consolidated field names sent by the EventSinkService to maximize reuse. +- Add status column to files report to indicate if files are ARCHIVED, etc. +- Reworked auto-archival configuration options to make their meanings more clear. + ## 1.15.1 - 2021-03-12 ### Fixed diff --git a/app/Global.scala b/app/Global.scala index 24adc32b8..f28527631 100644 --- a/app/Global.scala +++ b/app/Global.scala @@ -64,21 +64,18 @@ object Global extends WithFilters(new GzipFilter(), new Jsonp(), CORSFilter()) w val archiveEnabled = Play.application.configuration.getBoolean("archiveEnabled", false) if (archiveEnabled && archivalTimer == null) { - val archiveDebug = Play.application.configuration.getBoolean("archiveDebug", false) - val interval = if (archiveDebug) { 5 minutes } else { 1 day } - - // Determine time until next midnight - val now = ZonedDateTime.now - val midnight = now.truncatedTo(ChronoUnit.DAYS) - val sinceLastMidnight = Duration.between(midnight, now).getSeconds - val delay = if (archiveDebug) { 10 seconds } else { - (Duration.ofDays(1).getSeconds - sinceLastMidnight) seconds - } - - Logger.info("Starting archival loop - first iteration in " + delay + ", next iteration after " + interval) - archivalTimer = Akka.system.scheduler.schedule(delay, interval) { - Logger.info("Starting auto archive process...") - files.autoArchiveCandidateFiles() + // Set archiveAutoInterval == 0 to disable auto archiving + val archiveAutoInterval = Play.application.configuration.getLong("archiveAutoInterval", 0) + if (archiveAutoInterval > 0) { + val interval = FiniteDuration(archiveAutoInterval, SECONDS) + val archiveAutoDelay = Play.application.configuration.getLong("archiveAutoDelay", 0) + val delay = FiniteDuration(archiveAutoDelay, SECONDS) + + Logger.info("Starting archival loop - first iteration in " + delay + ", next iteration after " + interval) + archivalTimer = Akka.system.scheduler.schedule(delay, interval) { + Logger.info("Starting auto archive process...") + files.autoArchiveCandidateFiles() + } } } diff --git a/app/api/Admin.scala b/app/api/Admin.scala index ae5f8a857..d8c878df0 100644 --- a/app/api/Admin.scala +++ b/app/api/Admin.scala @@ -180,4 +180,10 @@ class Admin @Inject() (userService: UserService, if (success) Ok(toJson(Map("status" -> "reindex successfully queued"))) else BadRequest(toJson(Map("status" -> "reindex queuing failed, Elasticsearch may be disabled"))) } + + def deleteIndex = ServerAdminAction { implicit request => + val success = esqueue.queue("delete_index") + if (success) Ok(toJson(Map("status" -> "deindex successfully queued"))) + else BadRequest(toJson(Map("status" -> "deindex queuing failed, Elasticsearch may be disabled"))) + } } diff --git a/app/api/Reporting.scala b/app/api/Reporting.scala index 674f0d45c..c88c8e412 100644 --- a/app/api/Reporting.scala +++ b/app/api/Reporting.scala @@ -39,7 +39,7 @@ class Reporting @Inject()(selections: SelectionService, var headerRow = true val enum = Enumerator.generateM({ val chunk = if (headerRow) { - val header = "type,id,name,owner,owner_id,size_kb,uploaded,views,downloads,last_viewed,last_downloaded,location,parent_datasets,parent_collections,parent_spaces\n" + val header = "type,id,name,owner,owner_id,size_kb,uploaded,views,downloads,last_viewed,last_downloaded,location,parent_datasets,parent_collections,parent_spaces,status\n" headerRow = false Some(header.getBytes("UTF-8")) } else { @@ -137,7 +137,7 @@ class Reporting @Inject()(selections: SelectionService, // TODO: This will still fail on excessively large instances without Enumerator refactor - should we maintain this endpoint or remove? - var contents: String = "type,id,name,owner,owner_id,size_kb,uploaded/created,views,downloads,last_viewed,last_downloaded,location,parent_datasets,parent_collections,parent_spaces\n" + var contents: String = "type,id,name,owner,owner_id,size_kb,uploaded/created,views,downloads,last_viewed,last_downloaded,location,parent_datasets,parent_collections,parent_spaces,status\n" collections.getMetrics().foreach(coll => { contents += _buildCollectionRow(coll, true) @@ -288,7 +288,8 @@ class Reporting @Inject()(selections: SelectionService, contents += "\""+f.loader_id+"\"," contents += "\""+ds_list+"\"," contents += "\""+coll_list+"\"," - contents += "\""+space_list+"\"" + contents += "\""+space_list+"\"," + contents += "\""+f.status+"\"" contents += "\n" return contents @@ -343,6 +344,7 @@ class Reporting @Inject()(selections: SelectionService, if (returnAllColums) contents += "," // datasets do not have parent_datasets contents += "\""+coll_list+"\"," contents += "\""+space_list+"\"" + if (returnAllColums) contents += "," // datasets do not have status contents += "\n" return contents @@ -391,6 +393,7 @@ class Reporting @Inject()(selections: SelectionService, if (returnAllColums) contents += "," // collections do not have parent_datasets contents += "\""+coll_list+"\"," contents += "\""+space_list+"\"" + if (returnAllColums) contents += "," // collections do not have status contents += "\n" return contents diff --git a/app/api/Search.scala b/app/api/Search.scala index 405e065aa..603736ae4 100644 --- a/app/api/Search.scala +++ b/app/api/Search.scala @@ -22,7 +22,7 @@ class Search @Inject() ( /** Search using a simple text string with filters */ def search(query: String, resource_type: Option[String], datasetid: Option[String], collectionid: Option[String], spaceid: Option[String], folderid: Option[String], field: Option[String], tag: Option[String], - from: Option[Int], size: Option[Int], page: Option[Int]) = PermissionAction(Permission.ViewDataset) { implicit request => + from: Option[Int], size: Option[Int], page: Option[Int], sort: Option[String], order: Option[String]) = PermissionAction(Permission.ViewDataset) { implicit request => current.plugin[ElasticsearchPlugin] match { case Some(plugin) => { // If from is specified, use it. Otherwise use page * size of page if possible, otherwise use 0. @@ -42,7 +42,9 @@ class Search @Inject() ( (spaceid match {case Some(x) => s"&spaceid=$x" case None => ""}) + (folderid match {case Some(x) => s"&folderid=$x" case None => ""}) + (field match {case Some(x) => s"&field=$x" case None => ""}) + - (tag match {case Some(x) => s"&tag=$x" case None => ""}) + (tag match {case Some(x) => s"&tag=$x" case None => ""}) + + (sort match {case Some(x) => s"&sort=$x" case None => ""}) + + (order match {case Some(x) => s"&order=$x" case None => ""}) // Add space filter to search here as a simple permissions check val superAdmin = request.user match { @@ -54,7 +56,7 @@ class Search @Inject() ( else spaces.listAccess(0, Set[Permission](Permission.ViewSpace), request.user, true, true, false, false).map(sp => sp.id) - val response = plugin.search(query, resource_type, datasetid, collectionid, spaceid, folderid, field, tag, from_index, size, permitted, request.user) + val response = plugin.search(query, resource_type, datasetid, collectionid, spaceid, folderid, field, tag, from_index, size, sort, order, permitted, request.user) val result = SearchUtils.prepareSearchResponse(response, source_url, request.user) Ok(toJson(result)) } diff --git a/app/services/ElasticsearchPlugin.scala b/app/services/ElasticsearchPlugin.scala index c3b78d855..25b7c8d9d 100644 --- a/app/services/ElasticsearchPlugin.scala +++ b/app/services/ElasticsearchPlugin.scala @@ -29,6 +29,7 @@ import play.api.libs.json._ import _root_.util.SearchUtils import org.apache.commons.lang.StringUtils import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest +import org.elasticsearch.search.sort.SortOrder /** @@ -130,7 +131,8 @@ class ElasticsearchPlugin(application: Application) extends Plugin { * "field_leaf_key": name of immediate field only, e.g. 'lines' */ val queryObj = prepareElasticJsonQuery(query, grouping, permitted, user) - accumulatePageResult(queryObj, user, from.getOrElse(0), size.getOrElse(maxResults)) + // TODO: Support sorting in GUI search + accumulatePageResult(queryObj, user, from.getOrElse(0), size.getOrElse(maxResults), None, None) } /** @@ -152,8 +154,8 @@ class ElasticsearchPlugin(application: Application) extends Plugin { */ def search(query: String, resource_type: Option[String], datasetid: Option[String], collectionid: Option[String], spaceid: Option[String], folderid: Option[String], field: Option[String], tag: Option[String], - from: Option[Int], size: Option[Int], permitted: List[UUID], user: Option[User], - index: String = nameOfIndex): ElasticsearchResult = { + from: Option[Int], size: Option[Int], sort: Option[String], order: Option[String], permitted: List[UUID], + user: Option[User], index: String = nameOfIndex): ElasticsearchResult = { // Convert any parameters from API into the query syntax equivalent so we can parse it all together later var expanded_query = query @@ -166,16 +168,16 @@ class ElasticsearchPlugin(application: Application) extends Plugin { folderid.foreach(fid => expanded_query += s" in:$fid") val queryObj = prepareElasticJsonQuery(expanded_query.stripPrefix(" "), permitted, user) - accumulatePageResult(queryObj, user, from.getOrElse(0), size.getOrElse(maxResults)) + accumulatePageResult(queryObj, user, from.getOrElse(0), size.getOrElse(maxResults), sort, order) } /** Perform search, check permissions, and keep searching again if page isn't filled with permitted resources */ def accumulatePageResult(queryObj: XContentBuilder, user: Option[User], from: Int, size: Int, - index: String = nameOfIndex): ElasticsearchResult = { + sort: Option[String], order: Option[String], index: String = nameOfIndex): ElasticsearchResult = { var total_results = ListBuffer.empty[ResourceRef] // Fetch initial page & filter by permissions - val (results, total_size) = _search(queryObj, index, Some(from), Some(size)) + val (results, total_size) = _search(queryObj, index, Some(from), Some(size), sort, order) Logger.debug(s"Found ${results.length} results with ${total_size} total") val filtered = checkResultPermissions(results, user) Logger.debug(s"Permission to see ${filtered.length} results") @@ -187,7 +189,7 @@ class ElasticsearchPlugin(application: Application) extends Plugin { var exhausted = false while (total_results.length < size && !exhausted) { Logger.debug(s"Only have ${total_results.length} total results; searching for ${size*2} more from ${new_from}") - val (results, total_size) = _search(queryObj, index, Some(new_from), Some(size*2)) + val (results, total_size) = _search(queryObj, index, Some(new_from), Some(size*2), sort, order) Logger.debug(s"Found ${results.length} results with ${total_size} total") if (results.length == 0 || new_from+results.length == total_size) exhausted = true // No more results to find val filtered = checkResultPermissions(results, user) @@ -251,17 +253,39 @@ class ElasticsearchPlugin(application: Application) extends Plugin { /*** Execute query and return list of results and total result count as tuple */ def _search(queryObj: XContentBuilder, index: String = nameOfIndex, - from: Option[Int] = Some(0), size: Option[Int] = Some(maxResults)): (List[ResourceRef], Long) = { + from: Option[Int] = Some(0), size: Option[Int] = Some(maxResults), + sort: Option[String], order: Option[String]): (List[ResourceRef], Long) = { connect() val response = client match { case Some(x) => { - Logger.info("Searching Elasticsearch: "+queryObj.string()) + Logger.debug("Searching Elasticsearch: " + queryObj.string()) + + // Exclude _sort fields in response object + var sortFilter = jsonBuilder().startObject().startArray("exclude").value("*._sort").endArray().endObject() + var responsePrep = x.prepareSearch(index) .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setSource(sortFilter) .setQuery(queryObj) responsePrep = responsePrep.setFrom(from.getOrElse(0)) responsePrep = responsePrep.setSize(size.getOrElse(maxResults)) + // Default to ascending if no order provided but a field is + val searchOrder = order match { + case Some("asc") => SortOrder.ASC + case Some("desc") => SortOrder.DESC + case Some("DESC") => SortOrder.DESC + case _ => SortOrder.ASC + } + // Default to created field if order is provided but no field is + sort match { + // case Some("name") => responsePrep = responsePrep.addSort("name._sort", searchOrder) TODO: Not yet supported + case Some(x) => responsePrep = responsePrep.addSort(x, searchOrder) + case None => order match { + case Some(o) => responsePrep = responsePrep.addSort("created", searchOrder) + case None => {} + } + } val response = responsePrep.setExplain(true).execute().actionGet() Logger.debug("Search hits: " + response.getHits().getTotalHits()) @@ -291,8 +315,7 @@ class ElasticsearchPlugin(application: Application) extends Plugin { .field("type", "custom") .field("tokenizer", "uax_url_email") .endObject() - .endObject() - .endObject() + .endObject().endObject() .startObject("index") .startObject("mapping") .field("ignore_malformed", true) @@ -697,10 +720,14 @@ class ElasticsearchPlugin(application: Application) extends Plugin { * as strings for datatypes besides Objects. In the future, this could * be removed, but only once the Search API better supports those data types (e.g. Date). */ + + // TODO: With Elastic 6.8+ we can use "normalizer": "case_insensitive" for _sort fields + """{"clowder_object": { |"numeric_detection": true, |"properties": { - |"name": {"type": "string"}, + |"name": {"type": "string", "fields": { + | "_sort": {"type":"string", "index": "not_analyzed"}}}, |"description": {"type": "string"}, |"resource_type": {"type": "string", "include_in_all": false}, |"child_of": {"type": "string", "include_in_all": false}, @@ -925,7 +952,7 @@ class ElasticsearchPlugin(application: Application) extends Plugin { } } - // If a term is specified that isn't in this list, it's assumed to be a metadata field + // If a term is specified that isn't in this list, it's assumed to be a metadata field (for sorting and filtering) val official_terms = List("name", "creator", "created", "email", "resource_type", "in", "contains", "tag", "exists", "missing") // Create list of (key, operator, value) for passing to builder diff --git a/app/services/mongodb/ElasticsearchQueue.scala b/app/services/mongodb/ElasticsearchQueue.scala index a27bb277e..50fa9e7ab 100644 --- a/app/services/mongodb/ElasticsearchQueue.scala +++ b/app/services/mongodb/ElasticsearchQueue.scala @@ -53,6 +53,7 @@ class ElasticsearchQueue @Inject() ( } } case "index_all" => _indexAll() + case "delete_index" => _deleteIndex() case "index_swap" => _swapIndex() case _ => throw new IllegalArgumentException(s"Unrecognized action: ${action.action}") } @@ -63,6 +64,7 @@ class ElasticsearchQueue @Inject() ( case "index_dataset" => throw new IllegalArgumentException(s"No target specified for action ${action.action}") case "index_collection" => throw new IllegalArgumentException(s"No target specified for action ${action.action}") case "index_all" => _indexAll() + case "delete_index" => _deleteIndex() case "index_swap" => _swapIndex() case _ => throw new IllegalArgumentException(s"Unrecognized action: ${action.action}") } @@ -97,6 +99,12 @@ class ElasticsearchQueue @Inject() ( }) } + def _deleteIndex() = { + current.plugin[ElasticsearchPlugin].foreach(p => { + p.deleteAll() + }) + } + // Replace the main index with the newly reindexed temp file def _swapIndex() = { Logger.debug("Swapping temporary reindex for main index") diff --git a/app/services/mongodb/MongoDBFileService.scala b/app/services/mongodb/MongoDBFileService.scala index 9a9e87fe9..74b7cc539 100644 --- a/app/services/mongodb/MongoDBFileService.scala +++ b/app/services/mongodb/MongoDBFileService.scala @@ -4,23 +4,23 @@ import play.api.mvc.Request import services._ import models._ import com.mongodb.casbah.commons.{Imports, MongoDBObject} -import java.text.SimpleDateFormat +import java.text.SimpleDateFormat import _root_.util.{License, Parsers, SearchUtils} import scala.collection.mutable.ListBuffer import Transformation.LidoToCidocConvertion -import java.util.{ArrayList, Calendar} -import java.io._ +import java.util.{ArrayList, Calendar, Date} +import java.io._ import org.apache.commons.io.FileUtils import org.json.JSONObject import play.api.libs.json.{JsValue, Json} import com.mongodb.util.JSON + import java.nio.file.{FileSystems, Files} import java.nio.file.attribute.BasicFileAttributes -import java.time.LocalDateTime - +import java.time.Instant import collection.JavaConverters._ import scala.collection.JavaConversions._ import javax.inject.{Inject, Singleton} @@ -31,8 +31,6 @@ import scala.util.parsing.json.JSONArray import play.api.libs.json.JsArray import models.File import play.api.libs.json.JsObject -import java.util.Date - import com.novus.salat.dao.{ModelCompanion, SalatDAO} import MongoContext.context import play.api.Play._ @@ -40,6 +38,9 @@ import com.mongodb.casbah.Imports._ import models.FileStatus.FileStatus import org.bson.types.ObjectId +import java.time.temporal.ChronoUnit +import scala.concurrent.duration.FiniteDuration + /** * Use mongo for both metadata and blobs. @@ -201,48 +202,41 @@ class MongoDBFileService @Inject() ( * This may be expanded to support per-space configuration in the future. * * Reads the following parameters from Clowder configuration: - * - archiveAutoAfterDaysInactive - timeout after which files are considered + * - archiveAutoAfterInactiveCount - timeout after which files are considered * to be candidates for archival (see below) - * - archiveMinimumStorageSize - files below this size (in Bytes) should not be archived + * - archiveAutoAfterInactiveUnits - time unit that should be used for the timeout (see below) + * - archiveAutoAboveMinimumStorageSize - files below this size (in Bytes) should not be archived * - clowder.rabbitmq.clowderurl - the Clowder hostname to pass to the archival extractor * - commKey - the admin key to pass to the archival extractor * * Archival candidates are currently defined as follows: - * - file must be over `archiveMinimumStorageSize` Bytes in size - * - file must be over `archiveAutoAfterDaysInactive` days old + * - file's size must be greater than `archiveAutoAboveMinimumStorageSize` Bytes + * - file's age must be greater than `archiveAutoAfterInactiveCount` * `archiveAutoAfterInactiveUnits` + * (e.g. 10 days old) * - AND one of the following must be true: * - file has never been downloaded (0 downloads) * OR - * - file has not been downloaded in the past `archiveAutoAfterDaysInactive` days + * - file has not been downloaded in the past `archiveAutoAfterInactiveCount` `archiveAutoAfterInactiveUnits` * * */ def autoArchiveCandidateFiles() = { - val timeout = configuration(play.api.Play.current).getInt("archiveAutoAfterDaysInactive") + val timeout: Option[Long] = configuration(play.api.Play.current).getLong("archiveAutoAfterInactiveCount") timeout match { case None => Logger.info("No archival auto inactivity timeout set - skipping auto archival loop.") - case Some(days) => { - if (days == 0) { + case Some(inactiveTimeout) => { + if (inactiveTimeout == 0) { Logger.info("Archival auto inactivity timeout set to 0 - skipping auto archival loop.") } else { - // DEBUG ONLY: query for files that were uploaded within the past hour - val archiveDebug = configuration(play.api.Play.current).getBoolean("archiveDebug").getOrElse(false) - val oneHourAgo = LocalDateTime.now.minusHours(1).toString + "-00:00" - - // Query for files that haven't been accessed for at least this many days - val daysAgo = LocalDateTime.now.minusDays(days).toString + "-00:00" - val notDownloadedWithinTimeout = if (archiveDebug) { - ("stats.last_downloaded" $gte Parsers.fromISO8601(oneHourAgo)) ++ ("status" $eq FileStatus.PROCESSED.toString) - } else { - ("stats.last_downloaded" $lt Parsers.fromISO8601(daysAgo)) ++ ("status" $eq FileStatus.PROCESSED.toString) - } + val unit = configuration(play.api.Play.current).getString("archiveAutoAfterInactiveUnits").getOrElse("days") + val timeoutAgo = FiniteDuration(inactiveTimeout, unit) + + // Query for files that haven't been accessed for at least this many units + val since = Instant.now().minus(timeoutAgo.length.toLong, ChronoUnit.valueOf(timeoutAgo.unit.toString)).toString + "-00:00" + val notDownloadedWithinTimeout = ("stats.last_downloaded" $lt Parsers.fromISO8601(since)) ++ ("status" $eq FileStatus.PROCESSED.toString) // Include files that have never been downloaded, but make sure they are old enough - val neverDownloaded = if (archiveDebug) { - ("stats.downloads" $eq 0) ++ ("uploadDate" $gte Parsers.fromISO8601(oneHourAgo)) ++ ("status" $eq FileStatus.PROCESSED.toString) - } else { - ("stats.downloads" $eq 0) ++ ("uploadDate" $lt Parsers.fromISO8601(daysAgo)) ++ ("status" $eq FileStatus.PROCESSED.toString) - } + val neverDownloaded = ("stats.downloads" $eq 0) ++ ("uploadDate" $lt Parsers.fromISO8601(since)) ++ ("status" $eq FileStatus.PROCESSED.toString) // TODO: How to get host / apiKey / admin internally without a request? val host = configuration(play.api.Play.current).getString("clowder.rabbitmq.clowderurl").getOrElse("http://localhost:9000") @@ -257,7 +251,7 @@ class MongoDBFileService @Inject() ( Logger.info("Archival candidates found: " + matchingFiles.length) // Exclude candidates that do not exceed our minimum file size threshold - val minSize = configuration(play.api.Play.current).getLong("archiveMinimumStorageSize").getOrElse(1000000L) + val minSize = configuration(play.api.Play.current).getLong("archiveAutoAboveMinimumStorageSize").getOrElse(1000000L) // Loop all candidate files and submit each one for archival for (file <- matchingFiles) { diff --git a/conf/application.conf b/conf/application.conf index fcee9659b..898da5fe9 100644 --- a/conf/application.conf +++ b/conf/application.conf @@ -587,18 +587,20 @@ enableUsernamePassword = true # "archive" and "unarchive" should be purely inverse operations, such # that unarchive(archive(x)) == x for any valid input. # -# Available archival extractors: -# - ncsa.archival.disk - https://opensource.ncsa.illinois.edu/bitbucket/projects/CATS/repos/extractors-archival-disk/browse -# - ncsa.archival.s3 - https://opensource.ncsa.illinois.edu/bitbucket/projects/CATS/repos/extractors-archival-s3/browse +# See https://github.com/clowder-framework/extractors-archival for available extractors # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ archiveEnabled=false -archiveDebug=false +archiveAllowUnarchive=false #archiveExtractorId="ncsa.archival.s3" archiveExtractorId="ncsa.archival.disk" -archiveAllowUnarchive=false -archiveAutoAfterDaysInactive=90 -archiveMinimumStorageSize=1000000 + +# NOTE: Setting interval to zero will disable automatic archiving +archiveAutoInterval=0 # in seconds (e.g. 86400 == 24 hours) +archiveAutoDelay=120 # in seconds (e.g. 86400 == 24 hours) +archiveAutoAfterInactiveCount=90 # NOTE: Setting count to zero will disable automatic archiving +archiveAutoAfterInactiveUnits="days" +archiveAutoAboveMinimumStorageSize=1000000 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Configuration file for securesocial diff --git a/conf/routes b/conf/routes index 64a506b4b..d7822c627 100644 --- a/conf/routes +++ b/conf/routes @@ -310,6 +310,7 @@ POST /api/admin/users POST /api/sensors/config @api.Admin.sensorsConfig POST /api/changeAppearance @api.Admin.submitAppearance POST /api/reindex @api.Admin.reindex +POST /api/deleteindex @api.Admin.deleteIndex POST /api/admin/configuration @api.Admin.updateConfiguration #---------------------------------------------------------------------- @@ -663,7 +664,7 @@ DELETE /api/sections/:id # ---------------------------------------------------------------------- GET /api/search/json @api.Search.searchJson(query: String ?= "", grouping: String ?= "AND", from: Option[Int], size: Option[Int]) GET /api/search/multimediasearch @api.Search.searchMultimediaIndex(section_id: UUID) -GET /api/search @api.Search.search(query: String ?= "", resource_type: Option[String], datasetid: Option[String], collectionid: Option[String], spaceid: Option[String], folderid: Option[String], field: Option[String], tag: Option[String], from: Option[Int], size: Option[Int], page: Option[Int]) +GET /api/search @api.Search.search(query: String ?= "", resource_type: Option[String], datasetid: Option[String], collectionid: Option[String], spaceid: Option[String], folderid: Option[String], field: Option[String], tag: Option[String], from: Option[Int], size: Option[Int], page: Option[Int], sort: Option[String], order: Option[String]) # ---------------------------------------------------------------------- # GEOSTREAMS ENDPOINT diff --git a/doc/src/sphinx/conf.py b/doc/src/sphinx/conf.py index 24c97f9e2..1c0857138 100644 --- a/doc/src/sphinx/conf.py +++ b/doc/src/sphinx/conf.py @@ -22,7 +22,7 @@ author = 'Luigi Marini' # The full version, including alpha/beta/rc tags -release = '1.15.1' +release = '1.16.0' # -- General configuration --------------------------------------------------- diff --git a/docker/play.plugins b/docker/play.plugins index 8d3776c1d..0e8c40d9f 100644 --- a/docker/play.plugins +++ b/docker/play.plugins @@ -1,2 +1 @@ -9992:services.RabbitmqPlugin 10002:services.ElasticsearchPlugin diff --git a/project/Build.scala b/project/Build.scala index 79b39ed88..2c51c2ff7 100644 --- a/project/Build.scala +++ b/project/Build.scala @@ -13,7 +13,7 @@ import NativePackagerKeys._ object ApplicationBuild extends Build { val appName = "clowder" - val version = "1.15.1" + val version = "1.16.0" val jvm = "1.7" def appVersion: String = { diff --git a/public/swagger.yml b/public/swagger.yml index 5b0545f4e..b110554ec 100644 --- a/public/swagger.yml +++ b/public/swagger.yml @@ -9,7 +9,7 @@ info: Clowder is a customizable and scalable data management system to support any data format and multiple research domains. It is under active development and deployed for a variety of research projects. - version: 1.15.1 + version: 1.16.0 termsOfService: https://clowder.ncsa.illinois.edu/clowder/tos contact: name: Clowder @@ -150,6 +150,18 @@ paths: assuming "size" items per page. schema: type: integer + - name: sort + in: query + description: A date or numeric field to sort by. If order is given but no field specified, created date is used. + schema: + type: string + - name: order + in: query + description: Whether to scored in asc (ascending) or desc (descending) order. If a field is given without an order, asc is used. + schema: + type: string + enum: [asc, desc] + default: asc responses: 200: description: OK diff --git a/scripts/jmeter/README.md b/scripts/jmeter/README.md new file mode 100644 index 000000000..b23a873c8 --- /dev/null +++ b/scripts/jmeter/README.md @@ -0,0 +1,25 @@ +# JMeter Clowder Tests + +This directory includes a simple [JMeter](https://jmeter.apache.org/) test plan to exercise the service and API. +JMeter includes both a GUI to create and run the test plans as well as a CLI. When stress testing, use the CLI. + +Before running the test you will need to set `X-API-KEY` to your own Clowder API key. Change the web server protocol, +server name, and port, in HTTP Request Defaults and the file path `File.path` you want to use to test in `Upload file`. + +You can set the concurrency by changing the number of threads `ThreadGroup.num_threads` in Scenario 1. +This scenario includes the following steps: +- Create dataset +- Upload file to dataset +- Create folder +- Move file to folder +- Update file name +- Upload file metadata + +There is a 1s pause between each call. Make that shorter or disable if stress testing. + +To run the test from the command line use the following command: + +`jmeter -n -t jmeter-clowder.jmx -l jmeter-out -e -o jmeter-out-html` + +The file `jmeter-out` will include the status of each call. +The `jmeter-out-html` will include an html page with summaries and visualizations of that output. \ No newline at end of file diff --git a/scripts/jmeter/jmeter-clowder.jmx b/scripts/jmeter/jmeter-clowder.jmx new file mode 100644 index 000000000..53e16c6cb --- /dev/null +++ b/scripts/jmeter/jmeter-clowder.jmx @@ -0,0 +1,447 @@ + + + + + + false + false + + + + + + + + continue + + false + -1 + + 5 + 5 + 1373789594000 + 1373789594000 + true + 10 + 5 + Virtual Users Running Scenario 1. +Make test last 1 minute (see Scheduler) + true + + + + + + + localhost + 9000 + http + + /clowder + Notice Timeouts: +Read to 30s +Connect to 5s + 4 + 5000 + 30000 + + + + + false + false + + + + + + User-Agent + Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:48.0) Gecko/20100101 Firefox/48.0 + + + Accept + text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 + + + Accept-Language + fr,en-US;q=0.7,en;q=0.3 + + + Accept-Encoding + gzip, deflate + + + X-API-KEY + replace-with-your-api-key + + + + + + true + + + + false + { + "name": "Testing Uploads", + "description": "Created by JMeter script.", + "space": [] +} + = + + + + + + + + /clowder/api/datasets/createempty + POST + true + false + true + false + + + + + + + datasetId + $.id + + NOT_FOUND + + + + + + Content-Type + application/json + + + + + + + 1 + 0 + 0 + + + + 1000 + 100.0 + + + + + + + + /Users/lmarini/data/clowder-demo-files/9xLwvaT.jpg + file + image/jpg + + + + + + + + + + + /clowder/api/uploadToDataset/${datasetId} + POST + true + false + true + true + + + + + + + fileId + $.id + + NOT_FOUND + + + + + 1 + 0 + 0 + + + + 1000 + 100.0 + + + + + true + + + + false + { + "name": "folder", + "parentId": "${datasetId}", + "parentType": "dataset" +} + = + + + + + + + + /clowder/api/datasets/${datasetId}/newFolder + POST + true + false + true + false + + + + + + + + + Content-Type + application/json + + + + + + folderId + $.id + + NOT_FOUND + + + + + 1 + 0 + 0 + + + + 1000 + 100.0 + + + + + true + + + + false + { +} + = + + + + + + + + /clowder/api/datasets/${datasetId}/moveFile/${folderId}/${fileId} + POST + true + false + true + false + + + + + + + + + Content-Type + application/json + + + + + + + 1 + 0 + 0 + + + + 1000 + 100.0 + + + + + true + + + + false + { + "name":"newFolderName" +} + = + + + + + + + + /clowder/api/datasets/${datasetId}/updateName/${folderId} + PUT + true + false + true + false + + + + + + + + + Content-Type + application/json + + + + + + + 1 + 0 + 0 + + + + 1000 + 100.0 + + + + + true + + + + false + { + "@context":[ + "https://clowder.ncsa.illinois.edu/contexts/metadata.jsonld", + { + "Abstract": "http://purl.org/dc/terms/abstract" + } + ], + "agent": { + "@type":"cat:extractor", + "name":"Jmeter", + "extractor_id":"https://clowder.ncsa.illinois.edu/api/extractors/jmeter" + }, + "content": { + "Abstract": "Testing API calls using Jmeter." + } +} + = + + + + + + + + clowder/api/datasets/${datasetId}/metadata.jsonld + POST + true + false + true + false + + + + + + + + + Content-Type + application/json + + + + + + + 1 + 0 + 0 + + + + 1000 + 100.0 + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + false + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + + + For scripting only + + + + + +