Skip to content

Commit

Permalink
after ruff check --add-noqa
Browse files Browse the repository at this point in the history
  • Loading branch information
jinyan1214 committed Aug 14, 2024
1 parent a50555d commit c81daeb
Show file tree
Hide file tree
Showing 91 changed files with 1,253 additions and 959 deletions.
8 changes: 4 additions & 4 deletions modules/Workflow/computeResponseSpectrum.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def convert_accel_units(acceleration, from_, to_='cm/s/s'): # noqa: C901
acceleration = np.asarray(acceleration)
if from_ == 'g':
if to_ == 'g':
return acceleration
return acceleration # noqa: DOC201
if to_ in m_sec_square:
return acceleration * g
if to_ in cm_sec_square:
Expand Down Expand Up @@ -70,7 +70,7 @@ def get_velocity_displacement(
velocity = time_step * cumtrapz(acceleration, initial=0.0)
if displacement is None:
displacement = time_step * cumtrapz(velocity, initial=0.0)
return velocity, displacement
return velocity, displacement # noqa: DOC201


class NewmarkBeta:
Expand Down Expand Up @@ -160,7 +160,7 @@ def run(self):
'PGV': np.max(np.fabs(self.velocity)),
'PGD': np.max(np.fabs(self.displacement)),
}
return self.response_spectrum, time_series, accel, vel, disp
return self.response_spectrum, time_series, accel, vel, disp # noqa: DOC201

def _newmark_beta(self, omega, cval, kval): # noqa: ARG002
"""Newmark-beta integral
Expand Down Expand Up @@ -216,4 +216,4 @@ def _newmark_beta(self, omega, cval, kval): # noqa: ARG002
disp[j, :] = delta_u + disp[j - 1, :]
a_t[j, :] = ground_acc[j] + accel[j, :]

return accel, vel, disp, a_t
return accel, vel, disp, a_t # noqa: DOC201
44 changes: 22 additions & 22 deletions modules/Workflow/createGM4BIM.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901
unit_time = output_units.get('time', 'sec')
f_time = globals().get(unit_time, None)
if f_time is None:
raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, TRY003
raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, RUF100, TRY003

scale_factors = {}

Expand All @@ -88,7 +88,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901
# get the scale factor to standard units
f_in = globals().get(input_unit, None)
if f_in is None:
raise ValueError( # noqa: DOC501, TRY003
raise ValueError( # noqa: DOC501, RUF100, TRY003
f'Input unit for event files not recognized: {input_unit}' # noqa: EM102
)

Expand All @@ -98,7 +98,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901
unit_type = base_unit_type

if unit_type is None:
raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, TRY003
raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003

# the output unit depends on the unit type
if unit_type == 'acceleration':
Expand All @@ -111,7 +111,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901
f_out = 1.0 / f_length

else:
raise ValueError( # noqa: DOC501, TRY003
raise ValueError( # noqa: DOC501, RUF100, TRY003
f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102
)

Expand All @@ -120,7 +120,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901

scale_factors.update({input_name: f_scale})

return scale_factors
return scale_factors # noqa: DOC201


def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, D103, N802, N803, PLR0914, PLR0915
Expand Down Expand Up @@ -410,28 +410,28 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901,
m_pgd_y = 0.0
s_pgd_y = 0.0
# add to dictionary
dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID))
dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) # noqa: RUF031
# pga
dict_im[('PGA', 0, 1, 'median')].append(m_pga_x)
dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x)
dict_im[('PGA', 0, 2, 'median')].append(m_pga_y)
dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y)
dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) # noqa: RUF031
dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) # noqa: RUF031
dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) # noqa: RUF031
dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) # noqa: RUF031
# pgv
dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x)
dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x)
dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y)
dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y)
dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) # noqa: RUF031
dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) # noqa: RUF031
dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) # noqa: RUF031
dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) # noqa: RUF031
# pgd
dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x)
dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x)
dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y)
dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y)
dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) # noqa: RUF031
dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) # noqa: RUF031
dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) # noqa: RUF031
dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) # noqa: RUF031
for jj, Ti in enumerate(periods): # noqa: N806
cur_sa = f'SA({Ti}s)'
dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj])
dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj])
dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj])
dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj])
dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) # noqa: RUF031
dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) # noqa: RUF031
dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) # noqa: RUF031
dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) # noqa: RUF031

# aggregate
for cur_key, cur_value in dict_im.items():
Expand Down
32 changes: 16 additions & 16 deletions modules/Workflow/whale/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ def create_command(command_list, enforced_python=None):
for command_arg in command_list[1:]:
command += f'"{command_arg}" '

return command
return command # noqa: DOC201


def run_command(command):
Expand Down Expand Up @@ -357,7 +357,7 @@ def run_command(command):

py_script.main(arg_list)

return '', ''
return '', '' # noqa: DOC201

else: # noqa: RET505
# fmk with Shell=True not working on older windows machines, new approach needed for quoted command .. turn into a list
Expand Down Expand Up @@ -668,7 +668,7 @@ def get_command_list(self, app_path, force_posix=False): # noqa: FBT002, C901

# pp.pprint(arg_list)

return arg_list
return arg_list # noqa: DOC201


class Workflow: # noqa: PLR0904
Expand Down Expand Up @@ -857,7 +857,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901

if app_type_obj == None: # noqa: E711
err = 'The application ' + app_type + ' is not found in the app registry'
raise WorkFlowInputError(err) # noqa: DOC501
raise WorkFlowInputError(err) # noqa: DOC501, RUF100

# Finally check to see if the app registry contains the provided application
if app_type_obj.get(app_in) == None: # noqa: E711
Expand All @@ -866,7 +866,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901
+ app_in
)
print('Error', app_in) # noqa: T201
raise WorkFlowInputError(err) # noqa: DOC501
raise WorkFlowInputError(err) # noqa: DOC501, RUF100

appData = app_dict['ApplicationData'] # noqa: N806
#
Expand All @@ -878,7 +878,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901

# Check if the app object was created successfully
if app_object is None:
raise WorkFlowInputError(f'Application deep copy failed for {app_type}') # noqa: DOC501, EM102, TRY003
raise WorkFlowInputError(f'Application deep copy failed for {app_type}') # noqa: DOC501, EM102, RUF100, TRY003

# only assign the app to the workflow if it has an executable
if app_object.rel_path is None:
Expand Down Expand Up @@ -1081,7 +1081,7 @@ def _parse_inputs(self): # noqa: C901
# Events are special because they are in an array
if 'Events' in requested_apps:
if len(requested_apps['Events']) > 1:
raise WorkFlowInputError( # noqa: DOC501, TRY003
raise WorkFlowInputError( # noqa: DOC501, RUF100, TRY003
'Currently, WHALE only supports a single event.' # noqa: EM101
)
for event in requested_apps['Events'][
Expand All @@ -1104,7 +1104,7 @@ def _parse_inputs(self): # noqa: C901
)

if app_object is None:
raise WorkFlowInputError( # noqa: DOC501
raise WorkFlowInputError( # noqa: DOC501, RUF100
'Application entry missing for {}'.format('Events') # noqa: EM103
)

Expand All @@ -1114,12 +1114,12 @@ def _parse_inputs(self): # noqa: C901
self.workflow_apps['Event'] = app_object

else:
raise WorkFlowInputError( # noqa: DOC501, TRY003
raise WorkFlowInputError( # noqa: DOC501, RUF100, TRY003
'Currently, only earthquake and wind events are supported. ' # noqa: EM102
f'EventClassification must be Earthquake, not {eventClassification}'
)
else:
raise WorkFlowInputError('Need Event Classification') # noqa: DOC501, EM101, TRY003
raise WorkFlowInputError('Need Event Classification') # noqa: DOC501, EM101, RUF100, TRY003

# Figure out what types of assets are coming into the analysis
assetObjs = requested_apps.get('Assets', None) # noqa: N806
Expand All @@ -1130,7 +1130,7 @@ def _parse_inputs(self): # noqa: C901

# Check if asset list is not empty
if len(assetObjs) == 0:
raise WorkFlowInputError('The provided asset object is empty') # noqa: DOC501, EM101, TRY003
raise WorkFlowInputError('The provided asset object is empty') # noqa: DOC501, EM101, RUF100, TRY003

# Iterate through the asset objects
for assetObj in assetObjs: # noqa: N806
Expand Down Expand Up @@ -1316,7 +1316,7 @@ def create_asset_files(self):

log_div()

return assetFilesList
return assetFilesList # noqa: DOC201

def augment_asset_files(self): # noqa: C901
"""Short description
Expand Down Expand Up @@ -1504,7 +1504,7 @@ def augment_asset_files(self): # noqa: C901
)
log_div()

return assetFilesList
return assetFilesList # noqa: DOC201

def perform_system_performance_assessment(self, asset_type):
"""For an asset type run the system level performance assessment application
Expand All @@ -1525,7 +1525,7 @@ def perform_system_performance_assessment(self, asset_type):
prepend_timestamp=False,
)
log_div()
return False
return False # noqa: DOC201

if performance_app.rel_path == None: # noqa: E711
log_msg(
Expand Down Expand Up @@ -1905,7 +1905,7 @@ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'): # noqa: C901, N8
prepend_timestamp=False,
)
log_div()
return dst
return dst # noqa: DOC201

def cleanup_simdir(self, asst_id):
"""Short description
Expand Down Expand Up @@ -2730,7 +2730,7 @@ def estimate_losses( # noqa: C901
],
)
if ('PID', '0') in df_res.columns:
del df_res[('PID', '0')]
del df_res[('PID', '0')] # noqa: RUF031

# store the EDP statistics in the output DF
for col in np.transpose(col_info):
Expand Down
12 changes: 6 additions & 6 deletions modules/common/simcenter_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901
unit_time = output_units.get('time', 'sec')
f_time = globals().get(unit_time, None)
if f_time is None:
raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, TRY003
raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, RUF100, TRY003

scale_factors = {}

Expand All @@ -253,15 +253,15 @@ def get_scale_factors(input_units, output_units): # noqa: C901

f_in = globals().get(input_unit, None)
if f_in is None:
raise ValueError(f'Input unit not recognized: {input_unit}') # noqa: DOC501, EM102, TRY003
raise ValueError(f'Input unit not recognized: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003

unit_type = None
for base_unit_type, unit_set in globals()['unit_types'].items():
if input_unit in unit_set:
unit_type = base_unit_type

if unit_type is None:
raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, TRY003
raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003

# the output unit depends on the unit type
if unit_type == 'acceleration':
Expand All @@ -274,7 +274,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901
f_out = 1.0 / f_length

else:
raise ValueError( # noqa: DOC501, TRY003
raise ValueError( # noqa: DOC501, RUF100, TRY003
f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102
)

Expand All @@ -283,7 +283,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901

scale_factors.update({input_name: f_scale})

return scale_factors
return scale_factors # noqa: DOC201


def get_unit_bases(input_units):
Expand All @@ -306,4 +306,4 @@ def get_unit_bases(input_units):
input_unit_bases = cur_unit_bases
break

return input_unit_bases
return input_unit_bases # noqa: DOC201
2 changes: 1 addition & 1 deletion modules/createEVENT/CFDEvent/CFDEvent.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802
"""Converts direction to degree of freedom""" # noqa: D400, D401
directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806

return directioMap[direction]
return directioMap[direction] # noqa: DOC201


def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803
Expand Down
2 changes: 1 addition & 1 deletion modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802
"""Converts direction to degree of freedom""" # noqa: D400, D401
directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806

return directioMap[direction]
return directioMap[direction] # noqa: DOC201


def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803
Expand Down
14 changes: 7 additions & 7 deletions modules/createEVENT/EmptyDomainCFD/post_process_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803
time = np.asarray(time, dtype=np.float32)
p = np.asarray(p, dtype=np.float32)

return probes, time, p
return probes, time, p # noqa: DOC201


def read_pressure_data(file_names):
Expand Down Expand Up @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802

sField = np.asarray(sField, dtype=np.float32) # noqa: N806

return sField # noqa: RET504
return sField # noqa: DOC201, RET504


def read_openFoam_vector_field(file_name): # noqa: N802
Expand All @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802

vField = np.asarray(vField, dtype=np.float32) # noqa: N806

return vField # noqa: RET504
return vField # noqa: DOC201, RET504


def read_openFoam_tensor_field(file_name): # noqa: N802
Expand Down Expand Up @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802

vField = np.asarray(vField, dtype=np.float32) # noqa: N806

return vField # noqa: RET504
return vField # noqa: DOC201, RET504


def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802
Expand All @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802

vField = np.asarray(vField, dtype=np.float32) # noqa: N806

return vField # noqa: RET504
return vField # noqa: DOC201, RET504


def read_velocity_data(path):
Expand Down Expand Up @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803
time = np.asarray(time, dtype=np.float32)
U = np.asarray(U, dtype=np.float32) # noqa: N806

return probes, time, U
return probes, time, U # noqa: DOC201


def calculate_length_scale(u, uav, dt, min_corr=0.0):
Expand All @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0):

L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806

return L # noqa: RET504
return L # noqa: DOC201, RET504


def psd(x, dt, nseg): # noqa: F811
Expand Down
Loading

0 comments on commit c81daeb

Please sign in to comment.