Skip to content

Commit ef92dc1

Browse files
[MMM-19595] Always use newer moderations library pipeline framework (#1490)
1 parent e4117bc commit ef92dc1

File tree

2 files changed

+96
-256
lines changed

2 files changed

+96
-256
lines changed

custom_model_runner/datarobot_drum/drum/adapters/model_adapters/python_model_adapter.py

Lines changed: 2 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,6 @@ def __init__(self, model_dir, target_type=None):
107107
self._custom_task_class = None
108108
self._custom_task_class_instance = None
109109
self._mod_pipeline = None
110-
self._moderation_pipeline = None
111-
self._moderation_score_hook = None
112-
self._moderation_chat_hook = None
113110

114111
if target_type in (
115112
TargetType.TEXT_GENERATION,
@@ -158,20 +155,6 @@ def _load_moderation_hooks(self):
158155
# use the 'moderation_pipeline_factory()' to determine if moderations has integrated pipeline
159156
if hasattr(mod_module, "moderation_pipeline_factory"):
160157
self._mod_pipeline = mod_module.moderation_pipeline_factory(self._target_type.value)
161-
# use the 'create_pipeline' to determine if using version that supports VDB
162-
elif hasattr(mod_module, "create_pipeline"):
163-
self._moderation_score_hook = mod_module.get_moderations_fn(
164-
self._target_type.value, CustomHooks.SCORE
165-
)
166-
self._moderation_chat_hook = mod_module.get_moderations_fn(
167-
self._target_type.value, CustomHooks.CHAT
168-
)
169-
self._moderation_pipeline = mod_module.create_pipeline(self._target_type.value)
170-
elif self._target_type in (TargetType.TEXT_GENERATION, TargetType.AGENTIC_WORKFLOW):
171-
# older versions only support textgeneration -- access functions directly from module
172-
self._moderation_score_hook = getattr(mod_module, "guard_score_wrapper", None)
173-
self._moderation_chat_hook = getattr(mod_module, "guard_chat_wrapper", None)
174-
self._moderation_pipeline = mod_module.init()
175158
else:
176159
self._logger.warning(f"No support of {self._target_type} target in moderations.")
177160

@@ -640,18 +623,6 @@ def _predict_legacy_drum(self, data, model, **kwargs) -> RawPredictResponse:
640623
predictions_df.rename(
641624
columns={"completion": self._target_name}, inplace=True
642625
)
643-
elif self._moderation_pipeline and self._moderation_score_hook:
644-
predictions_df = self._moderation_score_hook(
645-
data,
646-
model,
647-
self._moderation_pipeline,
648-
score_fn,
649-
**kwargs,
650-
)
651-
if self._target_name not in predictions_df:
652-
predictions_df.rename(
653-
columns={"completion": self._target_name}, inplace=True
654-
)
655626
else:
656627
# noinspection PyCallingNonCallable
657628
predictions_df = score_fn(data, model, **kwargs)
@@ -792,16 +763,8 @@ def chat(self, completion_create_params, model, association_id):
792763
chat_fn = self._custom_hooks.get(CustomHooks.CHAT)
793764
if self._mod_pipeline:
794765
return self._mod_pipeline.chat(completion_create_params, model, chat_fn, association_id)
795-
elif self._moderation_pipeline and self._moderation_chat_hook:
796-
return self._moderation_chat_hook(
797-
completion_create_params,
798-
model,
799-
self._moderation_pipeline,
800-
chat_fn,
801-
association_id,
802-
)
803-
else:
804-
return chat_fn(completion_create_params, model)
766+
767+
return chat_fn(completion_create_params, model)
805768

806769
def get_supported_llm_models(self, model):
807770
"""

0 commit comments

Comments
 (0)