Related to #32888, but will not fix the issue.
Turns out these assertions are wrong (Not sure if they were correct at
some point).
I tested with this code:
```
request = LanguageModelRequest {
messages: vec![
LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::Text("Give me 10 jokes".to_string())],
cache: false,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec![MessageContent::Text("Sure, here are 10 jokes:".to_string())],
cache: false,
},
],
..request
};
```
The API happily accepted this and Claude proceeded to tell me 10 jokes.
Release Notes:
- N/A
@@ -267,23 +267,6 @@ impl LanguageModel for CopilotChatLanguageModel {
LanguageModelCompletionError,
>,
> {
- if let Some(message) = request.messages.last() {- if message.contents_empty() {- const EMPTY_PROMPT_MSG: &str =- "Empty prompts aren't allowed. Please provide a non-empty prompt.";- return futures::future::ready(Err(anyhow::anyhow!(EMPTY_PROMPT_MSG).into()))- .boxed();- }-- // Copilot Chat has a restriction that the final message must be from the user.- // While their API does return an error message for this, we can catch it earlier- // and provide a more helpful error message.- if !matches!(message.role, Role::User) {- const USER_ROLE_MSG: &str = "The final message must be from the user. To provide a system prompt, you must provide the system prompt followed by a user prompt.";- return futures::future::ready(Err(anyhow::anyhow!(USER_ROLE_MSG).into())).boxed();- }- }-
let copilot_request = match into_copilot_chat(&self.model, request) {
Ok(request) => request,
Err(err) => return futures::future::ready(Err(err.into())).boxed(),