mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
benchmark: higher tolerance for RobertaForQuestionAnswering (#107376)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/107376 Approved by: https://github.com/kit1980, https://github.com/XiaobingSuper, https://github.com/jansel ghstack dependencies: #107375
This commit is contained in:
parent
1ea83f04d2
commit
b9befc53a6
|
|
@ -35,7 +35,7 @@ PLBartForConditionalGeneration,pass,0
|
|||
PegasusForCausalLM,pass,0
|
||||
PegasusForConditionalGeneration,pass,0
|
||||
RobertaForCausalLM,pass,0
|
||||
RobertaForQuestionAnswering,fail_accuracy,0
|
||||
RobertaForQuestionAnswering,pass,0
|
||||
Speech2Text2ForCausalLM,pass,0
|
||||
T5ForConditionalGeneration,pass,0
|
||||
T5Small,pass,0
|
||||
|
|
|
|||
|
|
|
@ -37,7 +37,7 @@ PLBartForConditionalGeneration,pass,0
|
|||
PegasusForCausalLM,pass,0
|
||||
PegasusForConditionalGeneration,pass,0
|
||||
RobertaForCausalLM,pass,0
|
||||
RobertaForQuestionAnswering,fail_accuracy,0
|
||||
RobertaForQuestionAnswering,pass,0
|
||||
Speech2Text2ForCausalLM,pass,0
|
||||
T5ForConditionalGeneration,pass,0
|
||||
T5Small,pass,0
|
||||
|
|
|
|||
|
|
|
@ -163,12 +163,15 @@ SKIP_ACCURACY_CHECK_MODELS = {
|
|||
}
|
||||
|
||||
|
||||
REQUIRE_HIGHER_TOLERANCE = {
|
||||
REQUIRE_HIGHER_TOLERANCE_TRAINING = {
|
||||
"MT5ForConditionalGeneration",
|
||||
# AlbertForQuestionAnswering fails in CI GCP A100 but error does not seem
|
||||
# harmful.
|
||||
"AlbertForQuestionAnswering",
|
||||
}
|
||||
REQUIRE_HIGHER_TOLERANCE_INFERENCE = {
|
||||
"RobertaForQuestionAnswering",
|
||||
}
|
||||
|
||||
|
||||
SKIP_FOR_CPU = {
|
||||
|
|
@ -530,10 +533,13 @@ class HuggingfaceRunner(BenchmarkRunner):
|
|||
def get_tolerance_and_cosine_flag(self, is_training, current_device, name):
|
||||
cosine = self.args.cosine
|
||||
if is_training:
|
||||
if name in REQUIRE_HIGHER_TOLERANCE:
|
||||
if name in REQUIRE_HIGHER_TOLERANCE_TRAINING:
|
||||
return 2e-2, cosine
|
||||
else:
|
||||
return 1e-2, cosine
|
||||
else:
|
||||
if name in REQUIRE_HIGHER_TOLERANCE_INFERENCE:
|
||||
return 4e-3, cosine
|
||||
return 1e-3, cosine
|
||||
|
||||
def compute_loss(self, pred):
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user