Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
B
Basedformer
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Basedformer
Commits
67259a5b
Commit
67259a5b
authored
Apr 06, 2022
by
novelailab
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
change name
parent
02d93202
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
25 additions
and
26 deletions
+25
-26
basedformer/__init__.py
basedformer/__init__.py
+0
-0
basedformer/embeddings.py
basedformer/embeddings.py
+0
-0
basedformer/gptj.py
basedformer/gptj.py
+0
-0
basedformer/hypernet.py
basedformer/hypernet.py
+0
-0
basedformer/lm_base.py
basedformer/lm_base.py
+3
-5
basedformer/optimizer.py
basedformer/optimizer.py
+0
-0
basedformer/presets.py
basedformer/presets.py
+0
-0
basedformer/utils.py
basedformer/utils.py
+0
-0
hypertrain.py
hypertrain.py
+4
-17
scripts/comparehf.py
scripts/comparehf.py
+2
-2
scripts/cudagraph.py
scripts/cudagraph.py
+1
-1
setup.py
setup.py
+14
-0
train.py
train.py
+1
-1
No files found.
lm_arch
/__init__.py
→
basedformer
/__init__.py
View file @
67259a5b
File moved
lm_arch
/embeddings.py
→
basedformer
/embeddings.py
View file @
67259a5b
File moved
lm_arch
/gptj.py
→
basedformer
/gptj.py
View file @
67259a5b
File moved
lm_arch
/hypernet.py
→
basedformer
/hypernet.py
View file @
67259a5b
File moved
lm_arch
/lm_base.py
→
basedformer
/lm_base.py
View file @
67259a5b
from
lm_arch
import
utils
from
basedformer
import
utils
import
math
import
torch
from
torch
import
nn
from
lm_arch
import
gptj
from
basedformer
import
gptj
import
os
#Having common BaseLM functionality in this class instead of the torch LM itself makes sense.
...
...
@@ -77,9 +77,7 @@ def load_gpt_j(path="models/6b", state_dict=None):
"n_head"
:
16
,
"hidden_dim"
:
4096
,
"vocab_dim"
:
50400
,
"eps"
:
1e-5
,
"activation"
:
gptj
.
gelu_new
,
"Layer"
:
gptj
.
GPTJLayer
"eps"
:
1e-5
}
model
=
BaseLM
.
load
(
gptj
.
GPTJModel
,
config
,
path
,
state_dict
)
return
model
lm_arch
/optimizer.py
→
basedformer
/optimizer.py
View file @
67259a5b
File moved
lm_arch
/presets.py
→
basedformer
/presets.py
View file @
67259a5b
File moved
lm_arch
/utils.py
→
basedformer
/utils.py
View file @
67259a5b
File moved
hypertrain.py
View file @
67259a5b
...
...
@@ -3,7 +3,6 @@ import torch
import
torch.nn
as
nn
import
torch.nn.functional
as
F
from
pathlib
import
Path
from
lm_train
import
utils
from
torch.utils
import
data
import
math
import
sys
...
...
@@ -13,17 +12,18 @@ import wandb
import
numpy
as
np
from
torch.utils.checkpoint
import
checkpoint
as
ck
from
math
import
log2
,
ceil
from
lm_arch
import
gptj
,
lm_base
,
optimizer
from
lm_arch
import
util
from
basedformer
import
gptj
,
lm_base
,
optimizer
from
basedformer.utils
import
*
def
_init_weights
(
module
):
"""Initialize the weights."""
if
isinstance
(
module
,
nn
.
Linear
):
module
.
weight
.
data
.
normal_
(
mean
=
0.0
,
std
=
0.02
)
if
module
.
bias
is
not
None
:
module
.
bias
.
data
.
zero_
()
elif
isinstance
(
module
,
nn
.
Embedding
):
module
.
weight
.
data
.
normal_
(
mean
=
0.0
,
std
=
0.02
)
elif
isinstance
(
module
,
nn
.
LayerNorm
):
module
.
bias
.
data
.
zero_
()
module
.
weight
.
data
.
fill_
(
1.0
)
...
...
@@ -145,25 +145,12 @@ class HyperNetworkSingle(nn.Module):
x
=
x
.
mul
(
torch
.
sigmoid
(
x
))
return
x
.
bfloat16
()
model_config
=
{
"n_layer"
:
12
,
"n_head"
:
12
,
"hidden_dim"
:
768
,
"vocab_dim"
:
50400
,
"eps"
:
1e-5
,
"activation"
:
gelu_new
,
"Layer"
:
GPTLayer
}
model_config
=
{
"n_layer"
:
28
,
"n_head"
:
16
,
"hidden_dim"
:
4096
,
"vocab_dim"
:
50400
,
"eps"
:
1e-5
,
"activation"
:
gelu_new
,
"Layer"
:
GPTLayer
}
# we need 250 batch size to train the small GPT.
...
...
scripts/comparehf.py
View file @
67259a5b
from
lm_arch
import
lm_base
from
lm_arch
.utils
import
*
from
basedformer
import
lm_base
from
basedformer
.utils
import
*
import
time
import
torch
...
...
scripts/cudagraph.py
View file @
67259a5b
...
...
@@ -5,7 +5,7 @@ from time import perf_counter, perf_counter_ns
import
numpy
as
np
from
tqdm
import
tqdm
from
contextlib
import
contextmanager
from
lm_arch
.hypernet
import
*
from
basedformer
.hypernet
import
*
import
sys
#replicating timeit magic function of ipython
def
timeit
(
func
,
r
=
1
,
n
=
5
,
quiet
=
False
,
function
=
None
,
do_tqdm
=
False
,
first
=
True
):
...
...
setup.py
0 → 100644
View file @
67259a5b
import
setuptools
setuptools
.
setup
(
name
=
"basedformer"
,
version
=
"0.1"
,
author
=
"Eren Doğan"
,
description
=
"Modular and minimal transformer codebase for experimentation."
,
packages
=
setuptools
.
find_packages
(),
include_package_data
=
True
,
python_requires
=
'>=3.7'
,
package_data
=
{
'basedformer'
:
[
'*.json'
]},
install_requires
=
[
'dotmap'
,
'numpy'
]
)
\ No newline at end of file
train.py
View file @
67259a5b
...
...
@@ -6,7 +6,7 @@ import torch.optim as optim
from
pathlib
import
Path
from
lm_train
import
utils
from
torch.utils
import
data
from
lm_arch
import
lm_base
,
optimizer
from
basedformer
import
lm_base
,
optimizer
import
yaml
import
sys
from
tqdm
import
tqdm
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment