Skip to content

Commit

Permalink
Merge branch 'release/v0.2.8'
Browse files Browse the repository at this point in the history
  • Loading branch information
vogt31337 committed Sep 20, 2023
2 parents df91bbe + f4bfed5 commit 9424832
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 14 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/github_test_action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.9', '3.10']
python-version: ['3.9', '3.10', '3.11']

services:
mongodb:
Expand All @@ -27,7 +27,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install pytest
python -m pip install -U pytest
pip install -r requirements.txt
pip install -r requirements_dev.txt
pip install .["all"]
Expand Down
5 changes: 2 additions & 3 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ jobs:
needs: upload
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11']
python-version: ['3.9', '3.10', '3.11']
os: [ ubuntu-latest, windows-latest ]
steps:
- name: Set up Python ${{ matrix.python-version }}
Expand All @@ -86,8 +86,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install pytest python-igraph pytest-split
if ${{ matrix.python-version != '3.11' }}; then python -m pip install numba; fi
python -m pip install -U pytest python-igraph pytest-split
- name: Install pandahub from TestPyPI
if: ${{ inputs.upload_server == 'testpypi'}}
run: |
Expand Down
2 changes: 1 addition & 1 deletion pandahub/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.2.7"
__version__ = "0.2.8"

from pandahub.lib.PandaHub import PandaHub, PandaHubError
from pandahub.client.PandaHubClient import PandaHubClient
Expand Down
24 changes: 17 additions & 7 deletions pandahub/lib/PandaHub.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ def remove_user_from_project(self, email):

def get_all_nets_metadata_from_db(self, project_id=None):
if project_id:
self.set_active_project(project_id)
self.set_active_project_by_id(project_id)
self.check_permission('read')
db = self._get_project_database()
return list(db['_networks'].find())
Expand Down Expand Up @@ -1085,7 +1085,7 @@ def create_elements_in_db(self, net: Union[int,str], element_type: str, elements
data.append({**elm_data, **var_data, "net_id": net_id})
collection = self._collection_name_of_element(element_type)
insert_result = db[collection].insert_many(data)
return [[z[0].update(_id=z[1]) for z in zip(data, insert_result.inserted_ids)]]
return [z[0] | {"_id": z[1]} for z in zip(data, insert_result.inserted_ids)]

def _add_missing_defaults(self, db, net_id, element_type, element_data):
func_str = f"create_{element_type}"
Expand All @@ -1109,7 +1109,7 @@ def _add_missing_defaults(self, db, net_id, element_type, element_data):
std_type = element_data["std_type"]
net_doc = db["_networks"].find_one({"_id": net_id})
if net_doc is not None:
# std_types = json.loads(net_doc["data"]["std_types"], cls=io_pp.PPJSONDecoder)[element_type]
# std_types = json.loads(net_doc["data"]["std_types"], cls=io_pp.PPJSONDecoder)[element_type]
std_types = net_doc["data"]["std_types"]
if std_type in std_types:
element_data.update(std_types[std_type])
Expand Down Expand Up @@ -1238,7 +1238,7 @@ def bulk_write_to_db(self, data, collection_name="tasks", global_database=True,
for d in data]
db[collection_name].bulk_write(operations)

def bulk_update_in_db(self, data, document_ids, collection_name="tasks", global_database=False):
def bulk_update_in_db(self, data, document_ids, collection_name="tasks", global_database=False, project_id=None):
"""
Updates any number of documents in the database at once, according to their
document_ids.
Expand All @@ -1260,6 +1260,8 @@ def bulk_update_in_db(self, data, document_ids, collection_name="tasks", global_
None.
"""
if project_id:
self.set_active_project_by_id(project_id)
if global_database:
db = self._get_global_database()
else:
Expand Down Expand Up @@ -1367,6 +1369,7 @@ def bulk_write_timeseries_to_db(self, timeseries, data_type,
compress_ts_data=False,
global_database=False,
collection_name="timeseries",
project_id=None,
**kwargs):
"""
This function can be used to write a pandas DataFrame, containing multiple
Expand Down Expand Up @@ -1412,6 +1415,8 @@ def bulk_write_timeseries_to_db(self, timeseries, data_type,
"""
documents = []
if project_id:
self.set_active_project_by_id(project_id)
for col in timeseries.columns:
if meta_frame is not None:
args = {**kwargs, **meta_frame.loc[col]}
Expand All @@ -1430,7 +1435,7 @@ def bulk_write_timeseries_to_db(self, timeseries, data_type,
return [d["_id"] for d in documents]

def update_timeseries_in_db(self, new_ts_content, document_id, collection_name="timeseries",
global_database=False):
global_database=False, project_id=None):

"""
This function can be used to append a timeseries to an existing timseries
Expand All @@ -1457,6 +1462,8 @@ def update_timeseries_in_db(self, new_ts_content, document_id, collection_name="
None.
"""
if project_id:
self.set_active_project_by_id(project_id)
if global_database:
db = self._get_global_database()
else:
Expand All @@ -1469,7 +1476,7 @@ def update_timeseries_in_db(self, new_ts_content, document_id, collection_name="
)
# logger.info("document updated in database")

def bulk_update_timeseries_in_db(self, new_ts_content, document_ids, collection_name="timeseries",
def bulk_update_timeseries_in_db(self, new_ts_content, document_ids, project_id=None, collection_name="timeseries",
global_database=False):

"""
Expand Down Expand Up @@ -1499,13 +1506,16 @@ def bulk_update_timeseries_in_db(self, new_ts_content, document_ids, collection_
-------
None
"""
if project_id:
self.set_active_project_by_id(project_id)

documents = []
for i in range(len(new_ts_content.columns)):
col = new_ts_content.columns[i]
document = {}
document["timeseries_data"] = {"$each": convert_timeseries_to_subdocuments(new_ts_content[col])}
documents.append(document)
self.bulk_update_in_db(documents, document_ids, project=project,
self.bulk_update_in_db(documents, document_ids, project_id=project_id,
collection_name="timeseries", global_database=global_database)

# logger.debug(f"{len(documents)} documents added to database")
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
name='pandahub',
packages=find_packages(),
url='https://github.com/e2nIEE/pandahub',
version='0.2.7',
version='0.2.8',
include_package_data=True,
long_description_content_type='text/markdown',
zip_safe=False,
Expand Down

0 comments on commit 9424832

Please sign in to comment.