Skip to content

Commit

Permalink
Merge branch 'stable' of https://github.com/Lexi-BU/lxi_gui into stable
Browse files Browse the repository at this point in the history
  • Loading branch information
lexibu committed Jan 7, 2025
2 parents 6514a2d + 88d71be commit a51db87
Show file tree
Hide file tree
Showing 8 changed files with 951 additions and 111 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -146,4 +146,7 @@ poetry.lock

.flake8

# Ignore the .ds store file
.DS_Store

documents_from_ff
24 changes: 12 additions & 12 deletions codes/luigi.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -3,24 +3,24 @@ x_min_entry = -5
x_max_entry = 5
y_min_entry = -5
y_max_entry = 5
hist_bin_entry = 200
c_min_entry = 1
c_max_entry = None
hist_bin_entry = 100
c_min_entry = 5
c_max_entry = 500
density_status = False
norm_type = log
norm_type = linear
unit_type = mcp
v_min_thresh_entry = 1.2
v_max_thresh_entry = 3.4
v_sum_min_thresh_entry = 5
v_sum_max_thresh_entry = 7
cut_status = False
v_min_thresh_entry = 0
v_max_thresh_entry = 4
v_sum_min_thresh_entry = 0
v_sum_max_thresh_entry = 20
cut_status = True
curve_fit_status = False
lin_corr_status = True
non_lin_corr_status = True
non_lin_corr_status = False
cmap = viridis

[time_options]
start_time = 2023-01-01 00:00:00
end_time = 2026-12-31 00:00:00
start_time = 2023-11-16 10:00:00
end_time = 2024-11-16 16:32:52
time_threshold = 60

38 changes: 20 additions & 18 deletions codes/lxi_file_read_funcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import datetime
import importlib
import os
import platform
import logging
import struct
from pathlib import Path
Expand Down Expand Up @@ -407,15 +408,15 @@ def read_binary_data_sci(

# Split the file name in a folder and a file name
# Format filenames and folder names for the different operating systems
if os.name == "posix":
if platform.system() == "Linux":
output_file_name = os.path.basename(os.path.normpath(in_file_name)).split(".")[0] + "_sci_output.csv"
output_folder_name = os.path.dirname(os.path.normpath(in_file_name)) + "/processed_data/sci"
save_file_name = output_folder_name + "/" + output_file_name
elif os.name == "nt":
elif platform.system() == "Windows":
output_file_name = os.path.basename(os.path.normpath(in_file_name)).split(".")[0] + "_sci_output.csv"
output_folder_name = os.path.dirname(os.path.normpath(in_file_name)) + "\\processed_data\\sci"
save_file_name = output_folder_name + "\\" + output_file_name
elif os.name == "darwin":
elif platform.system() == "Darwin":
output_file_name = os.path.basename(os.path.normpath(in_file_name)).split(".")[0] + "_sci_output.csv"
output_folder_name = os.path.dirname(os.path.normpath(in_file_name)) + "/processed_data/sci"
save_file_name = output_folder_name + "/" + output_file_name
Expand Down Expand Up @@ -850,7 +851,8 @@ def read_binary_data_hk(
for key in df.keys():
for ii in range(1, len(df[key])):
if np.isnan(df[key][ii]):
df[key][ii] = df[key][ii - 1]
# df[key][ii] = df[key][ii - 1]
df.loc[ii, key] = df.loc[ii - 1, key]

# Set the date column to the Date_datetime
df["Date"] = Date_datetime
Expand Down Expand Up @@ -881,15 +883,15 @@ def read_binary_data_hk(
df.set_index("Date", inplace=True, drop=False)
# Split the file name in a folder and a file name
# Format filenames and folder names for the different operating systems
if os.name == "posix":
if platform.system() == "Linux":
output_folder_name = os.path.dirname(os.path.normpath(in_file_name)) + "/processed_data/hk"
output_file_name = os.path.basename(os.path.normpath(in_file_name)).split(".")[0] + "_hk_output.csv"
save_file_name = output_folder_name + "/" + output_file_name
elif os.name == "nt":
elif platform.system() == "Windows":
output_folder_name = os.path.dirname(os.path.normpath(in_file_name)) + "\\processed_data\\hk"
output_file_name = os.path.basename(os.path.normpath(in_file_name)).split(".")[0] + "_hk_output.csv"
save_file_name = output_folder_name + "\\" + output_file_name
elif os.name == "darwin":
elif platform.system() == "Darwin":
output_folder_name = os.path.dirname(os.path.normpath(in_file_name)) + "/processed_data/hk"
output_file_name = os.path.basename(os.path.normpath(in_file_name)).split(".")[0] + "_hk_output.csv"
save_file_name = output_folder_name + "/" + output_file_name
Expand All @@ -916,11 +918,11 @@ def open_file_sci(start_time=None, end_time=None):
)

# Get the file name from the file path for different operating systems
if os.name == "posix":
if platform.system() == "Linux":
file_name_sci = file_val.split("/")[-1]
elif os.name == "nt":
elif platform.system() == "Windows":
file_name_sci = file_val.split("\\")[-1]
elif os.name == "darwin":
elif platform.system() == "Darwin":
file_name_sci = file_val.split("/")[-1]
else:
raise OSError("Operating system not supported.")
Expand All @@ -945,11 +947,11 @@ def open_file_hk(start_time=None, end_time=None):
)

# Get the file name from the file path for different operating systems
if os.name == "posix":
if platform.system() == "Linux":
file_name_hk = file_val.split("/")[-1]
elif os.name == "nt":
elif platform.system() == "Windows":
file_name_hk = file_val.split("\\")[-1]
elif os.name == "darwin":
elif platform.system() == "Darwin":
file_name_hk = file_val.split("/")[-1]
else:
raise OSError("Operating system not supported.")
Expand Down Expand Up @@ -1451,7 +1453,7 @@ def read_binary_file(file_val=None, t_start=None, t_end=None, multiple_files=Fal
):
raise ValueError(
"when multiple_files is True, both t_start and t_end must either be"
f"None or a valid time value. The vlaues provided are t_start ="
f"None or a valid time value. The values provided are t_start ="
f"{t_start} and t_end = {t_end}."
)
# If both t_start and t_end are None, raise a warning stating that the times are set to none
Expand All @@ -1467,7 +1469,6 @@ def read_binary_file(file_val=None, t_start=None, t_end=None, multiple_files=Fal
# Convert t_start and t_end from string to datetime in UTC timezone
t_start = pd.to_datetime(t_start, utc=True)
t_end = pd.to_datetime(t_end, utc=True)

try:
# Convert t_start and t_end from string to unix time in seconds in UTC timezone
t_start_unix = t_start.timestamp()
Expand All @@ -1485,6 +1486,7 @@ def read_binary_file(file_val=None, t_start=None, t_end=None, multiple_files=Fal

# Make sure that file_val is a directory
if not os.path.isdir(file_val):
print(f"\n \x1b[1;31;255m WARNING: {file_val} is not a directory. \x1b[0m")
raise ValueError("file_val should be a directory.")

# Get the names of all the files in the directory with*.dat or *.txt extension
Expand Down Expand Up @@ -1555,7 +1557,7 @@ def read_binary_file(file_val=None, t_start=None, t_end=None, multiple_files=Fal
os.makedirs(save_dir)

# Get the file name based on the os path
if os.name == "nt":
if platform.system() == "Windows":
file_name_hk = save_dir + "\\processed_data\\hk\\" + \
file_name_hk_list[0].split("\\")[-1].split('.')[0].split('_')[0] + '_' + \
file_name_hk_list[0].split("\\")[-1].split('.')[0].split('_')[1] + '_' + \
Expand All @@ -1571,7 +1573,7 @@ def read_binary_file(file_val=None, t_start=None, t_end=None, multiple_files=Fal
file_name_hk_list[0].split("\\")[-1].split('.')[0].split('_')[3] + '_' + \
file_name_sci_list[-1].split("\\")[-1].split('.')[0].split('_')[-4] + '_' + \
file_name_sci_list[-1].split("\\")[-1].split('.')[0].split('_')[-3] + '_sci_output.csv'
elif os.name == "posix":
elif platform.system() == "Linux":
file_name_hk = save_dir + "/processed_data/hk/" + \
file_name_hk_list[0].split("/")[-1].split('.')[0].split('_')[0] + '_' + \
file_name_hk_list[0].split("/")[-1].split('.')[0].split('_')[1] + '_' + \
Expand All @@ -1587,7 +1589,7 @@ def read_binary_file(file_val=None, t_start=None, t_end=None, multiple_files=Fal
file_name_hk_list[0].split("/")[-1].split('.')[0].split('_')[3] + '_' + \
file_name_sci_list[-1].split("/")[-1].split('.')[0].split('_')[-4] + '_' + \
file_name_sci_list[-1].split("/")[-1].split('.')[0].split('_')[-3] + '_sci_output.csv'
elif os.name == "darwin":
elif platform.system() == "Darwin":
file_name_hk = save_dir + "/processed_data/hk/" + \
file_name_hk_list[0].split("/")[-1].split('.')[0].split('_')[0] + '_' + \
file_name_hk_list[0].split("/")[-1].split('.')[0].split('_')[1] + '_' + \
Expand Down
Loading

0 comments on commit a51db87

Please sign in to comment.