Generate text based on the given text prompt.
import os
from friendli import SyncFriendli
with SyncFriendli(
token=os.getenv("FRIENDLI_TOKEN", ""),
) as friendli:
res = friendli.dedicated.completions.complete(
dedicated_completions_body={
"model": "(endpoint-id)",
"stream": False,
"prompt": "Say this is a test!",
}
)
# Handle response
print(res)
| Parameter |
Type |
Required |
Description |
dedicated_completions_body |
models.DedicatedCompletionsBody |
✔️ |
N/A |
x_friendli_team |
OptionalNullable[str] |
➖ |
ID of team to run requests as (optional parameter). |
retries |
Optional[utils.RetryConfig] |
➖ |
Configuration to override the default retry behavior of the client. |
models.ContainerCompletionsSuccess
| Error Type |
Status Code |
Content Type |
| models.SDKError |
4XX, 5XX |
*/* |
Generate text based on the given text prompt.
import os
from friendli import SyncFriendli
with SyncFriendli(
token=os.getenv("FRIENDLI_TOKEN", ""),
) as friendli:
res = friendli.dedicated.completions.stream(
dedicated_completions_stream_body={
"model": "(endpoint-id)",
"stream": True,
"prompt": "Say this is a test!",
}
)
with res as event_stream:
for event in event_stream:
# handle event
print(event, flush=True)
Union[eventstreaming.EventStream[models.ContainerCompletionsStreamSuccess], eventstreaming.EventStreamAsync[models.ContainerCompletionsStreamSuccess]]
| Error Type |
Status Code |
Content Type |
| models.SDKError |
4XX, 5XX |
*/* |