diff --git a/Gemfile b/Gemfile index 2524d8da9601..1cd2ef35f895 100644 --- a/Gemfile +++ b/Gemfile @@ -3,17 +3,10 @@ source "https://rubygems.org" # gemspec +gem "beautiful-jekyll-theme", "6.0.1" gem "ffi", "= 1.16.3" gem 'jemoji' gem 'tzinfo' gem 'tzinfo-data', platforms: [:mingw, :mswin, :x64_mingw] gem 'wdm', '>= 0.1.0' -gem 'eventmachine', '1.2.7', git: 'https://github.com/eventmachine/eventmachine.git', tag: 'v1.2.7' -gem "jekyll-remote-theme" -gem 'rubyzip', '2.3.0' -gem 'sassc' #, '2.4.0' -gem "beautiful-jekyll-theme", "6.0.1" -gem 'html-pipeline' -gem 'selma', '~> 0.4.12' -gem 'zeitwerk', '~> 2.7', '>= 2.7.1' -gem "rouge" \ No newline at end of file +gem 'eventmachine', '1.2.7', git: 'https://github.com/eventmachine/eventmachine.git', tag: 'v1.2.7' \ No newline at end of file diff --git a/_posts/Coding/25-02-08-DDP.md b/_posts/Coding/25-02-08-DDP.md index 14162a83ed40..f7048780a900 100644 --- a/_posts/Coding/25-02-08-DDP.md +++ b/_posts/Coding/25-02-08-DDP.md @@ -108,28 +108,20 @@ sidebar: #### DDP Initialization 이제 이를 바탕으로 파이썬 코드 내에서 어떻게 세팅하는지 살펴보겠습니다. 각자마다 코딩하는 스타일이 있겠지만 저는 아래와 같이 작성하곤 합니다. -{% highlight python %} -args.device = 'cuda:0' -args.world_size = 1 -args.rank = 0 -args.local_rank = int(os.environ.get("LOCAL_RANK", 0)) -torch.cuda.set_device(args.local_rank) -torch.distributed.init_process_group(backend='nccl', init_method='env://') -args.world_size = torch.distributed.get_world_size() -args.local_rank = torch.distributed.get_rank() -{% endhighlight %} +
-args.device = 'cuda:0'
-args.world_size = 1
-args.rank = 0
-args.local_rank = int(os.environ.get("LOCAL_RANK", 0))
+args.device = 'cuda:0'
+args.world_size = 1
+args.rank = 0
+args.local_rank = int(os.environ.get("LOCAL_RANK", 0))
 torch.cuda.set_device(args.local_rank)
-torch.distributed.init_process_group(backend='nccl', init_method='env://')
+torch.distributed.init_process_group(backend='nccl', init_method='env://')
 args.world_size = torch.distributed.get_world_size()
 args.local_rank = torch.distributed.get_rank()
 
+ --- @@ -149,18 +141,19 @@ args.local_rank = torch.distributed.get_rank() #### Model에 DDP 적용하기 -다음으로는 model에 DDP를 어떻게 적용 및 학습을 하는지 살펴보겠습니다.. (매우 간단합니다!!) +다음으로는 model에 DDP를 어떻게 적용 및 학습을 하는지 살펴보겠습니다.. (매우 간단합니다!!
-from torch.nn.parallel import DistributedDataParallel as DDP\\
-model = DDP(model,device_ids=[args.local_rank])\\
+from torch.nn.parallel import DistributedDataParallel as DDP
+model = DDP(model,device_ids=[args.local_rank])
 ...
 logits = model(x)
 loss = loss_fn(logits, labels)
 loss.backward() 
 
+ ---