File “<private_file>”, line 1750, in _call_impl
return forward_call(*args, **kwargs)
File “<private_file>”, line 287, in forward
vision_embeddings = self.get_multimodal_embeddings(**kwargs)
File “<private_file>”, line 230, in get_multimodal_embeddings
vision_embeddings = self._process_image_input(image_input)
File “<private_file>”, line 340, in _process_image_input
return self.vision_language_adapter(self.vision_encoder(image_input))
File “<private_file>”, line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “<private_file>”, line 1750, in _call_impl
return forward_call(*args, **kwargs)
File “<private_file>”, line 659, in forward
out = self.transformer(patch_embeds, mask=mask, freqs_cis=freqs_cis)
File “<private_file>”, line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “<private_file>”, line 1750, in _call_impl
return forward_call(*args, **kwargs)
File “<private_file>”, line 562, in forward
x = layer(x, mask=mask, freqs_cis=freqs_cis)
File “<private_file>”, line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “<private_file>”, line 1750, in _call_impl
return forward_call(*args, **kwargs)
File “<private_file>”, line 538, in forward
r = self.attention.forward(self.attention_norm(x),
File “<private_file>”, line 518, in forward
out = <redacted_value>(q, k, v, attn_bias=mask)
File “<private_file>”, line 306, in memory_efficient_attention
return _memory_efficient_attention(
File “<private_file>”, line 467, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File “<private_file>”, line 486, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
File “<private_file>”, line 135, in _dispatch_fw
return _run_priority_list(
File “<private_file>”, line 76, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for memory_efficient_attention_forward
with inputs:
query : shape=(1, 512, 16, 64) (torch.float16)
key : shape=(1, 512, 16, 64) (torch.float16)
value : shape=(1, 512, 16, 64) (torch.float16)
attn_bias : <class ‘<private_test_file>’>
p : <sensitive_data>
<private_test_file>
is not supported because:
xFormers wasn’t build with CUDA support
<private_test_file>
is not supported because:
xFormers wasn’t build with CUDA support
operator wasn’t built - see python -m <redacted_value>.info
for more info
[W530 12:09:53.564663342 ProcessGroupNCCL.cpp:1496] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see <private_test_file>.
We got this error, but in local works. ¿any idea?