Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
H
Hydra Node Http
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Hydra Node Http
Commits
548f5aaa
Commit
548f5aaa
authored
Jul 29, 2022
by
novelailab
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
load config and model
parent
7c07f8b2
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
97 additions
and
1 deletion
+97
-1
hydra_node/config.py
hydra_node/config.py
+91
-0
main.py
main.py
+6
-1
No files found.
hydra_node/config.py
0 → 100644
View file @
548f5aaa
import
logging
import
os
import
platform
import
socket
import
sys
import
time
import
sentry_sdk
import
torch
from
dotmap
import
DotMap
from
icecream
import
ic
from
sentry_sdk
import
capture_exception
from
sentry_sdk.integrations.threading
import
ThreadingIntegration
def
init_config_model
():
config
=
DotMap
()
config
.
model_type
=
"GPT"
is_dev
=
""
environment
=
"production"
if
os
.
environ
[
'DEV'
]
==
"True"
:
is_dev
=
"_dev"
environment
=
"staging"
config
.
is_dev
=
is_dev
# So we know all about the errors.
sentry_url
=
os
.
getenv
(
"SENTRY_URL"
)
sentry_sdk
.
init
(
sentry_url
,
server_name
=
socket
.
gethostname
(),
traces_sample_rate
=
0.002
,
environment
=
environment
,
integrations
=
[
ThreadingIntegration
(
propagate_hub
=
True
)],
)
# Setup logger
logger
=
logging
.
getLogger
(
__name__
)
logger
.
setLevel
(
level
=
logging
.
INFO
)
fh
=
logging
.
StreamHandler
()
fh_formatter
=
logging
.
Formatter
(
"
%(asctime)
s
%(levelname)
s
%(filename)
s(
%(process)
d) -
%(message)
s"
)
fh
.
setFormatter
(
fh_formatter
)
logger
.
addHandler
(
fh
)
config
.
logger
=
logger
# Gather node information
config
.
cuda_dev
=
torch
.
cuda
.
current_device
()
cpu_id
=
platform
.
processor
()
if
os
.
path
.
exists
(
'/proc/cpuinfo'
):
cpu_id
=
[
line
for
line
in
open
(
"/proc/cpuinfo"
,
'r'
)
.
readlines
()
if
'model name'
in
line
][
0
]
.
rstrip
()
.
split
(
': '
)[
-
1
]
config
.
cpu_id
=
cpu_id
config
.
gpu_id
=
torch
.
cuda
.
get_device_name
(
config
.
cuda_dev
)
config
.
node_id
=
platform
.
node
()
# Report on our CUDA memory and model.
gb_gpu
=
int
(
torch
.
cuda
.
get_device_properties
(
config
.
cuda_dev
)
.
total_memory
/
(
1000
*
1000
*
1000
))
logger
.
info
(
f
"CPU: {config.cpu_id}"
)
logger
.
info
(
f
"GPU: {config.gpu_id}"
)
logger
.
info
(
f
"GPU RAM: {gb_gpu}gb"
)
config
.
model_name
=
os
.
environ
[
'MODEL'
]
logger
.
info
(
f
"MODEL: {config.model_name}"
)
# Resolve where we get our model and data from.
config
.
model_path
=
os
.
getenv
(
'MODEL_PATH'
,
None
)
# Misc settings
config
.
model_alias
=
os
.
getenv
(
'MODEL_ALIAS'
)
# Instantiate our actual model.
load_time
=
time
.
time
()
try
:
model
=
GPTModel
(
config
)
except
Exception
as
e
:
ic
(
e
)
capture_exception
(
e
)
logger
.
error
(
"Failed to load model, restarting."
)
sys
.
exit
(
1
)
config
.
model
=
model
# Mark that our model is loaded.
f
=
open
(
"/tmp/health_startup"
,
"w"
)
f
.
close
()
time_load
=
time
.
time
()
-
load_time
logger
.
info
(
f
"Models loaded in {time_load:.2f}s"
)
return
model
,
config
\ No newline at end of file
main.py
View file @
548f5aaa
from
fastapi
import
FastAPI
from
pydantic
import
BaseModel
from
fastapi.responses
import
HTMLResponse
from
sentry_sdk
import
capture_exception
from
sentry_sdk
import
capture_message
from
sentry_sdk
import
start_transaction
from
hydra_node.config
import
init_config_model
app
=
FastAPI
()
@
app
.
on_event
(
"startup"
)
async
def
startup_event
():
print
(
'Start'
)
print
(
"Startup"
)
#model, config = init_config()
@
app
.
on_event
(
"shutdown"
)
async
def
shutdown_event
():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment