0
0
Fork 0
mirror of https://github.com/matrix-construct/construct synced 2024-12-25 23:14:13 +01:00

ircd::gpt::pipe: Add model invalidation for device resynchronization.

This commit is contained in:
Jason Volk 2021-04-22 12:11:25 -07:00
parent bd460a80ce
commit 2a3c54afa2
4 changed files with 28 additions and 2 deletions

View file

@ -29,7 +29,9 @@ struct ircd::gpt::pipe::exec
const_buffer
send_opts, // Set when sending the options page.
send_ctrl; // Set when sending the control page.
send_ctrl, // Set when sending the control page.
send_coil, // Set when sending the updated model.
send_head; // Set when sending the updated model.
mutable_buffer
recv_ctrl; // Set when receiving the control page.
@ -46,6 +48,8 @@ struct ircd::gpt::pipe::exec
cl::exec
release_opts, // Release the options page.
release_ctrl, // Release the control page.
release_coil, // Release updates to the model.
release_head, // Release updates to the model.
lm_embed, // Compute token and positional embeddings.
coil[12 * 2], // Pass over all layers.
lm_norm, // Final normalization.

View file

@ -26,6 +26,7 @@ struct ircd::gpt::pipe::model
std::unique_ptr<model::decoder> decode;
std::unique_ptr<model::language> embed;
bool invalid {false};
model(const gpt::model::decoder &, const gpt::model::embed &);
model(gpt::model::decoder &, gpt::model::embed &);

View file

@ -161,7 +161,7 @@ ircd::gpt::model::init_from_cache(const string_view &cache_path)
const fs::fd fd
{
cache_path, std::ios::in | std::ios::out
cache_path, std::ios::in //| std::ios::out
};
fs::map::opts map_opts
@ -171,6 +171,7 @@ ircd::gpt::model::init_from_cache(const string_view &cache_path)
map_opts.huge2mb = true;
map_opts.locked = false;
map_opts.shared = false;
default_model_shm = fs::map
{
fd, map_opts, sizeof(decoder)

View file

@ -260,6 +260,16 @@ ircd::gpt::pipe::exec::exec(task &task,
reinterpret_cast<const char *>(task.ctrl),
release? sizeof(struct ircd_gpt_task): 0
}
,send_coil
{
reinterpret_cast<const char *>(gpt::model::default_model),
release && desc->model->invalid? (sizeof(gpt::model::block) * 12 + sizeof(gpt::model::norm)): 0
}
,send_head
{
reinterpret_cast<const char *>(&gpt::model::default_model->word),
release && desc->model->invalid? sizeof(gpt::model::embed): 0
}
,recv_ctrl
{
reinterpret_cast<char *>(task.ctrl),
@ -309,6 +319,14 @@ ircd::gpt::pipe::exec::exec(task &task,
{
desc->ctrl, send_ctrl
}
,release_coil
{
desc->model->decode->master[0], send_coil
}
,release_head
{
desc->model->embed->master[0], send_head
}
,lm_embed
{
desc->lm_embed, range_lm_embed, anode_opts
@ -361,6 +379,8 @@ ircd::gpt::pipe::exec::exec(task &task,
desc->ctrl, recv_ctrl
}
{
if(release && desc->model->invalid)
desc->model->invalid = false;
}
ircd::gpt::pipe::exec::~exec()